blob: 3d9de509087631d2113ed798fc99d1ceb757af05 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +000017// The same is done for certain SMEM and VMEM opcodes, e.g.:
Marek Olsakb953cc32017-11-09 01:52:23 +000018// s_buffer_load_dword s4, s[0:3], 4
19// s_buffer_load_dword s5, s[0:3], 8
20// ==>
21// s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22//
Farhana Aleence095c52018-12-14 21:13:14 +000023// This pass also tries to promote constant offset to the immediate by
24// adjusting the base. It tries to use a base from the nearby instructions that
25// allows it to have a 13bit constant offset and then promotes the 13bit offset
26// to the immediate.
27// E.g.
28// s_movk_i32 s0, 0x1800
29// v_add_co_u32_e32 v0, vcc, s0, v2
30// v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
31//
32// s_movk_i32 s0, 0x1000
33// v_add_co_u32_e32 v5, vcc, s0, v2
34// v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
35// global_load_dwordx2 v[5:6], v[5:6], off
36// global_load_dwordx2 v[0:1], v[0:1], off
37// =>
38// s_movk_i32 s0, 0x1000
39// v_add_co_u32_e32 v5, vcc, s0, v2
40// v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
41// global_load_dwordx2 v[5:6], v[5:6], off
42// global_load_dwordx2 v[0:1], v[5:6], off offset:2048
Matt Arsenault41033282014-10-10 22:01:59 +000043//
44// Future improvements:
45//
46// - This currently relies on the scheduler to place loads and stores next to
47// each other, and then only merges adjacent pairs of instructions. It would
48// be good to be more flexible with interleaved instructions, and possibly run
49// before scheduling. It currently missing stores of constants because loading
50// the constant into the data register is placed between the stores, although
51// this is arguably a scheduling problem.
52//
53// - Live interval recomputing seems inefficient. This currently only matches
54// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000055// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000056//
57// - With a list of instructions to process, we can also merge more. If a
58// cluster of loads have offsets that are too large to fit in the 8-bit
59// offsets, but are close enough to fit in the 8 bits, we can add to the base
60// pointer and use the new reduced offsets.
61//
62//===----------------------------------------------------------------------===//
63
64#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000065#include "AMDGPUSubtarget.h"
Neil Henning76504a42018-12-12 16:15:21 +000066#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenault41033282014-10-10 22:01:59 +000067#include "SIInstrInfo.h"
68#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000069#include "Utils/AMDGPUBaseInfo.h"
70#include "llvm/ADT/ArrayRef.h"
71#include "llvm/ADT/SmallVector.h"
72#include "llvm/ADT/StringRef.h"
73#include "llvm/Analysis/AliasAnalysis.h"
74#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000075#include "llvm/CodeGen/MachineFunction.h"
76#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000078#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000079#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000080#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000081#include "llvm/IR/DebugLoc.h"
82#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000083#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000084#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000085#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000086#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000087#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000088#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000089#include <iterator>
90#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000091
92using namespace llvm;
93
94#define DEBUG_TYPE "si-load-store-opt"
95
96namespace {
Neil Henning76504a42018-12-12 16:15:21 +000097enum InstClassEnum {
98 UNKNOWN,
99 DS_READ,
100 DS_WRITE,
101 S_BUFFER_LOAD_IMM,
102 BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
103 BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
104 BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
105 BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
106 BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
107 BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
108 BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
109 BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
110};
111
112enum RegisterEnum {
113 SBASE = 0x1,
114 SRSRC = 0x2,
115 SOFFSET = 0x4,
116 VADDR = 0x8,
117 ADDR = 0x10,
118};
Matt Arsenault41033282014-10-10 22:01:59 +0000119
120class SILoadStoreOptimizer : public MachineFunctionPass {
NAKAMURA Takumiaba2b3d2017-10-10 08:30:53 +0000121 struct CombineInfo {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000122 MachineBasicBlock::iterator I;
123 MachineBasicBlock::iterator Paired;
124 unsigned EltSize;
125 unsigned Offset0;
126 unsigned Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000127 unsigned Width0;
128 unsigned Width1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000129 unsigned BaseOff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000130 InstClassEnum InstClass;
Marek Olsakb953cc32017-11-09 01:52:23 +0000131 bool GLC0;
132 bool GLC1;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000133 bool SLC0;
134 bool SLC1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000135 bool UseST64;
Neil Henning76504a42018-12-12 16:15:21 +0000136 SmallVector<MachineInstr *, 8> InstsToMove;
137 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000138
Farhana Aleence095c52018-12-14 21:13:14 +0000139 struct BaseRegisters {
140 unsigned LoReg = 0;
141 unsigned HiReg = 0;
142
143 unsigned LoSubReg = 0;
144 unsigned HiSubReg = 0;
145 };
146
147 struct MemAddress {
148 BaseRegisters Base;
149 int64_t Offset = 0;
150 };
151
152 using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
153
Matt Arsenault41033282014-10-10 22:01:59 +0000154private:
Tom Stellard5bfbae52018-07-11 20:59:01 +0000155 const GCNSubtarget *STM = nullptr;
Eugene Zelenko66203762017-01-21 00:53:49 +0000156 const SIInstrInfo *TII = nullptr;
157 const SIRegisterInfo *TRI = nullptr;
158 MachineRegisterInfo *MRI = nullptr;
159 AliasAnalysis *AA = nullptr;
Neil Henning76504a42018-12-12 16:15:21 +0000160 bool OptimizeAgain;
Matt Arsenault41033282014-10-10 22:01:59 +0000161
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000162 static bool offsetsCanBeCombined(CombineInfo &CI);
Neil Henning76504a42018-12-12 16:15:21 +0000163 static bool widthsFit(const CombineInfo &CI);
164 static unsigned getNewOpcode(const CombineInfo &CI);
165 static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
166 const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
167 unsigned getOpcodeWidth(const MachineInstr &MI);
168 InstClassEnum getInstClass(unsigned Opc);
169 unsigned getRegs(unsigned Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000170
Marek Olsakb953cc32017-11-09 01:52:23 +0000171 bool findMatchingInst(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000172
173 unsigned read2Opcode(unsigned EltSize) const;
174 unsigned read2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000175 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000176
177 unsigned write2Opcode(unsigned EltSize) const;
178 unsigned write2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000179 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000180 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000181 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
Marek Olsak58410f32017-11-09 01:52:55 +0000182 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000183
Farhana Aleence095c52018-12-14 21:13:14 +0000184 void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
185 int32_t NewOffset);
186 unsigned computeBase(MachineInstr &MI, const MemAddress &Addr);
187 MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI);
188 Optional<int32_t> extractConstOffset(const MachineOperand &Op);
189 void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr);
190 /// Promotes constant offset to the immediate by adjusting the base. It
191 /// tries to use a base from the nearby instructions that allows it to have
192 /// a 13bit constant offset which gets promoted to the immediate.
193 bool promoteConstantOffsetToImm(MachineInstr &CI,
194 MemInfoMap &Visited,
195 SmallPtrSet<MachineInstr *, 4> &Promoted);
196
Matt Arsenault41033282014-10-10 22:01:59 +0000197public:
198 static char ID;
199
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000200 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000201 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
202 }
203
204 bool optimizeBlock(MachineBasicBlock &MBB);
205
206 bool runOnMachineFunction(MachineFunction &MF) override;
207
Mark Searles7687d422018-01-22 21:46:43 +0000208 StringRef getPassName() const override { return "SI Load Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000209
210 void getAnalysisUsage(AnalysisUsage &AU) const override {
211 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000212 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000213
214 MachineFunctionPass::getAnalysisUsage(AU);
215 }
216};
217
Eugene Zelenko66203762017-01-21 00:53:49 +0000218} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000219
220INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
Mark Searles7687d422018-01-22 21:46:43 +0000221 "SI Load Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000222INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Neil Henning76504a42018-12-12 16:15:21 +0000223INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
224 false, false)
Matt Arsenault41033282014-10-10 22:01:59 +0000225
226char SILoadStoreOptimizer::ID = 0;
227
228char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
229
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000230FunctionPass *llvm::createSILoadStoreOptimizerPass() {
231 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000232}
233
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000234static void moveInstsAfter(MachineBasicBlock::iterator I,
Neil Henning76504a42018-12-12 16:15:21 +0000235 ArrayRef<MachineInstr *> InstsToMove) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000236 MachineBasicBlock *MBB = I->getParent();
237 ++I;
238 for (MachineInstr *MI : InstsToMove) {
239 MI->removeFromParent();
240 MBB->insert(I, MI);
241 }
242}
243
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000244static void addDefsUsesToList(const MachineInstr &MI,
245 DenseSet<unsigned> &RegDefs,
246 DenseSet<unsigned> &PhysRegUses) {
247 for (const MachineOperand &Op : MI.operands()) {
248 if (Op.isReg()) {
249 if (Op.isDef())
250 RegDefs.insert(Op.getReg());
251 else if (Op.readsReg() &&
252 TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
253 PhysRegUses.insert(Op.getReg());
254 }
Matt Arsenaultb02cebf2018-02-08 01:56:14 +0000255 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000256}
257
Eugene Zelenko66203762017-01-21 00:53:49 +0000258static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
259 MachineBasicBlock::iterator B,
260 const SIInstrInfo *TII,
Neil Henning76504a42018-12-12 16:15:21 +0000261 AliasAnalysis *AA) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000262 // RAW or WAR - cannot reorder
263 // WAW - cannot reorder
264 // RAR - safe to reorder
265 return !(A->mayStore() || B->mayStore()) ||
Neil Henning76504a42018-12-12 16:15:21 +0000266 TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
Alexander Timofeevf867a402016-11-03 14:37:13 +0000267}
268
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000269// Add MI and its defs to the lists if MI reads one of the defs that are
270// already in the list. Returns true in that case.
Neil Henning76504a42018-12-12 16:15:21 +0000271static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
272 DenseSet<unsigned> &PhysRegUses,
273 SmallVectorImpl<MachineInstr *> &Insts) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000274 for (MachineOperand &Use : MI.operands()) {
275 // If one of the defs is read, then there is a use of Def between I and the
276 // instruction that I will potentially be merged with. We will need to move
277 // this instruction after the merged instructions.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000278 //
279 // Similarly, if there is a def which is read by an instruction that is to
280 // be moved for merging, then we need to move the def-instruction as well.
281 // This can only happen for physical registers such as M0; virtual
282 // registers are in SSA form.
283 if (Use.isReg() &&
284 ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
285 (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
286 PhysRegUses.count(Use.getReg())))) {
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000287 Insts.push_back(&MI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000288 addDefsUsesToList(MI, RegDefs, PhysRegUses);
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000289 return true;
290 }
291 }
292
293 return false;
294}
295
Neil Henning76504a42018-12-12 16:15:21 +0000296static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
297 ArrayRef<MachineInstr *> InstsToMove,
298 const SIInstrInfo *TII, AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000299 assert(MemOp.mayLoadOrStore());
300
301 for (MachineInstr *InstToMove : InstsToMove) {
302 if (!InstToMove->mayLoadOrStore())
303 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000304 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
Neil Henning76504a42018-12-12 16:15:21 +0000305 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000306 }
307 return true;
308}
309
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000310bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000311 // XXX - Would the same offset be OK? Is there any reason this would happen or
312 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000313 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000314 return false;
315
316 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000317 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000318 return false;
319
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000320 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
321 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
322 CI.UseST64 = false;
323 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000324
Marek Olsak58410f32017-11-09 01:52:55 +0000325 // Handle SMEM and VMEM instructions.
Neil Henning76504a42018-12-12 16:15:21 +0000326 if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
327 return (EltOffset0 + CI.Width0 == EltOffset1 ||
328 EltOffset1 + CI.Width1 == EltOffset0) &&
Marek Olsak6a0548a2017-11-09 01:52:30 +0000329 CI.GLC0 == CI.GLC1 &&
330 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
Marek Olsakb953cc32017-11-09 01:52:23 +0000331 }
332
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000333 // If the offset in elements doesn't fit in 8-bits, we might be able to use
334 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000335 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
336 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
337 CI.Offset0 = EltOffset0 / 64;
338 CI.Offset1 = EltOffset1 / 64;
339 CI.UseST64 = true;
340 return true;
341 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000342
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000343 // Check if the new offsets fit in the reduced 8-bit range.
344 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
345 CI.Offset0 = EltOffset0;
346 CI.Offset1 = EltOffset1;
347 return true;
348 }
349
350 // Try to shift base address to decrease offsets.
351 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
352 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
353
354 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
355 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
356 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
357 CI.UseST64 = true;
358 return true;
359 }
360
361 if (isUInt<8>(OffsetDiff)) {
362 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
363 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
364 return true;
365 }
366
367 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000368}
369
Neil Henning76504a42018-12-12 16:15:21 +0000370bool SILoadStoreOptimizer::widthsFit(const CombineInfo &CI) {
371 const unsigned Width = (CI.Width0 + CI.Width1);
372 switch (CI.InstClass) {
373 default:
374 return Width <= 4;
375 case S_BUFFER_LOAD_IMM:
376 switch (Width) {
377 default:
378 return false;
379 case 2:
380 case 4:
381 return true;
382 }
383 }
384}
385
386unsigned SILoadStoreOptimizer::getOpcodeWidth(const MachineInstr &MI) {
387 const unsigned Opc = MI.getOpcode();
388
389 if (TII->isMUBUF(MI)) {
390 return AMDGPU::getMUBUFDwords(Opc);
391 }
392
393 switch (Opc) {
394 default:
395 return 0;
396 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
397 return 1;
398 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
399 return 2;
400 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
401 return 4;
402 }
403}
404
405InstClassEnum SILoadStoreOptimizer::getInstClass(unsigned Opc) {
406 if (TII->isMUBUF(Opc)) {
407 const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
408
409 // If we couldn't identify the opcode, bail out.
410 if (baseOpcode == -1) {
411 return UNKNOWN;
412 }
413
414 switch (baseOpcode) {
415 default:
416 return UNKNOWN;
417 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
418 return BUFFER_LOAD_OFFEN;
419 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
420 return BUFFER_LOAD_OFFSET;
421 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
422 return BUFFER_STORE_OFFEN;
423 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
424 return BUFFER_STORE_OFFSET;
425 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
426 return BUFFER_LOAD_OFFEN_exact;
427 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
428 return BUFFER_LOAD_OFFSET_exact;
429 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
430 return BUFFER_STORE_OFFEN_exact;
431 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
432 return BUFFER_STORE_OFFSET_exact;
433 }
434 }
435
436 switch (Opc) {
437 default:
438 return UNKNOWN;
439 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
440 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
441 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
442 return S_BUFFER_LOAD_IMM;
443 case AMDGPU::DS_READ_B32:
444 case AMDGPU::DS_READ_B64:
445 case AMDGPU::DS_READ_B32_gfx9:
446 case AMDGPU::DS_READ_B64_gfx9:
447 return DS_READ;
448 case AMDGPU::DS_WRITE_B32:
449 case AMDGPU::DS_WRITE_B64:
450 case AMDGPU::DS_WRITE_B32_gfx9:
451 case AMDGPU::DS_WRITE_B64_gfx9:
452 return DS_WRITE;
453 }
454}
455
456unsigned SILoadStoreOptimizer::getRegs(unsigned Opc) {
457 if (TII->isMUBUF(Opc)) {
458 unsigned result = 0;
459
460 if (AMDGPU::getMUBUFHasVAddr(Opc)) {
461 result |= VADDR;
462 }
463
464 if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
465 result |= SRSRC;
466 }
467
468 if (AMDGPU::getMUBUFHasSoffset(Opc)) {
469 result |= SOFFSET;
470 }
471
472 return result;
473 }
474
475 switch (Opc) {
476 default:
477 return 0;
478 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
479 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
480 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
481 return SBASE;
482 case AMDGPU::DS_READ_B32:
483 case AMDGPU::DS_READ_B64:
484 case AMDGPU::DS_READ_B32_gfx9:
485 case AMDGPU::DS_READ_B64_gfx9:
486 case AMDGPU::DS_WRITE_B32:
487 case AMDGPU::DS_WRITE_B64:
488 case AMDGPU::DS_WRITE_B32_gfx9:
489 case AMDGPU::DS_WRITE_B64_gfx9:
490 return ADDR;
491 }
492}
493
Marek Olsakb953cc32017-11-09 01:52:23 +0000494bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000495 MachineBasicBlock *MBB = CI.I->getParent();
496 MachineBasicBlock::iterator E = MBB->end();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000497 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000498
Neil Henning76504a42018-12-12 16:15:21 +0000499 const unsigned Opc = CI.I->getOpcode();
500 const InstClassEnum InstClass = getInstClass(Opc);
501
502 if (InstClass == UNKNOWN) {
503 return false;
504 }
505
506 const unsigned Regs = getRegs(Opc);
507
508 unsigned AddrOpName[5] = {0};
509 int AddrIdx[5];
510 const MachineOperand *AddrReg[5];
Marek Olsak6a0548a2017-11-09 01:52:30 +0000511 unsigned NumAddresses = 0;
Marek Olsakb953cc32017-11-09 01:52:23 +0000512
Neil Henning76504a42018-12-12 16:15:21 +0000513 if (Regs & ADDR) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000514 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
Neil Henning76504a42018-12-12 16:15:21 +0000515 }
516
517 if (Regs & SBASE) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000518 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
Neil Henning76504a42018-12-12 16:15:21 +0000519 }
520
521 if (Regs & SRSRC) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000522 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
Neil Henning76504a42018-12-12 16:15:21 +0000523 }
524
525 if (Regs & SOFFSET) {
526 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
527 }
528
529 if (Regs & VADDR) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000530 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000531 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000532
Marek Olsak6a0548a2017-11-09 01:52:30 +0000533 for (unsigned i = 0; i < NumAddresses; i++) {
534 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
535 AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
536
Neil Henning76504a42018-12-12 16:15:21 +0000537 // We only ever merge operations with the same base address register, so
538 // don't bother scanning forward if there are no other uses.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000539 if (AddrReg[i]->isReg() &&
540 (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
541 MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
542 return false;
543 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000544
Matt Arsenault41033282014-10-10 22:01:59 +0000545 ++MBBI;
546
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000547 DenseSet<unsigned> RegDefsToMove;
548 DenseSet<unsigned> PhysRegUsesToMove;
549 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000550
Neil Henning76504a42018-12-12 16:15:21 +0000551 for (; MBBI != E; ++MBBI) {
552 const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
553
554 if ((getInstClass(MBBI->getOpcode()) != InstClass) ||
555 (IsDS && (MBBI->getOpcode() != Opc))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000556 // This is not a matching DS instruction, but we can keep looking as
557 // long as one of these conditions are met:
558 // 1. It is safe to move I down past MBBI.
559 // 2. It is safe to move MBBI down past the instruction that I will
560 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000561
Matt Arsenault2d69c922017-08-29 21:25:51 +0000562 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000563 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000564 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000565 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000566 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000567
568 if (MBBI->mayLoadOrStore() &&
Neil Henning76504a42018-12-12 16:15:21 +0000569 (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
570 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000571 // We fail condition #1, but we may still be able to satisfy condition
572 // #2. Add this instruction to the move list and then we will check
573 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000574 CI.InstsToMove.push_back(&*MBBI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000575 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000576 continue;
577 }
578
579 // When we match I with another DS instruction we will be moving I down
580 // to the location of the matched instruction any uses of I will need to
581 // be moved down as well.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000582 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
583 CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000584 continue;
585 }
586
587 // Don't merge volatiles.
588 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000589 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000590
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000591 // Handle a case like
592 // DS_WRITE_B32 addr, v, idx0
593 // w = DS_READ_B32 addr, idx0
594 // DS_WRITE_B32 addr, f(w), idx1
595 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
596 // merging of the two writes.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000597 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
598 CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000599 continue;
600
Marek Olsak6a0548a2017-11-09 01:52:30 +0000601 bool Match = true;
602 for (unsigned i = 0; i < NumAddresses; i++) {
603 const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000604
Marek Olsak6a0548a2017-11-09 01:52:30 +0000605 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
606 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
607 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
608 Match = false;
609 break;
610 }
611 continue;
612 }
613
Neil Henning76504a42018-12-12 16:15:21 +0000614 // Check same base pointer. Be careful of subregisters, which can occur
615 // with vectors of pointers.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000616 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
617 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
618 Match = false;
619 break;
620 }
621 }
622
623 if (Match) {
Neil Henning76504a42018-12-12 16:15:21 +0000624 int OffsetIdx =
625 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::offset);
Marek Olsakb953cc32017-11-09 01:52:23 +0000626 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
Neil Henning76504a42018-12-12 16:15:21 +0000627 CI.Width0 = getOpcodeWidth(*CI.I);
Marek Olsakb953cc32017-11-09 01:52:23 +0000628 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
Neil Henning76504a42018-12-12 16:15:21 +0000629 CI.Width1 = getOpcodeWidth(*MBBI);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000630 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000631
Neil Henning76504a42018-12-12 16:15:21 +0000632 if ((CI.InstClass == DS_READ) || (CI.InstClass == DS_WRITE)) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000633 CI.Offset0 &= 0xffff;
634 CI.Offset1 &= 0xffff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000635 } else {
636 CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
637 CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000638 if (CI.InstClass != S_BUFFER_LOAD_IMM) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000639 CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
640 CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
641 }
Marek Olsakb953cc32017-11-09 01:52:23 +0000642 }
643
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000644 // Check both offsets fit in the reduced range.
645 // We also need to go through the list of instructions that we plan to
646 // move and make sure they are all safe to move down past the merged
647 // instruction.
Neil Henning76504a42018-12-12 16:15:21 +0000648 if (widthsFit(CI) && offsetsCanBeCombined(CI))
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000649 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
650 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000651 }
652
653 // We've found a load/store that we couldn't merge for some reason.
654 // We could potentially keep looking, but we'd need to make sure that
655 // it was safe to move I and also all the instruction in InstsToMove
656 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000657 // check if we can move I across MBBI and if we can move all I's users
658 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000659 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000660 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000661 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000662 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000663}
664
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000665unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
666 if (STM->ldsRequiresM0Init())
667 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
668 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
669}
670
671unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
672 if (STM->ldsRequiresM0Init())
673 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
674
Neil Henning76504a42018-12-12 16:15:21 +0000675 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
676 : AMDGPU::DS_READ2ST64_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000677}
678
Neil Henning76504a42018-12-12 16:15:21 +0000679MachineBasicBlock::iterator
680SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000681 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000682
683 // Be careful, since the addresses could be subregisters themselves in weird
684 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000685 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000686
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000687 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
688 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000689
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000690 unsigned NewOffset0 = CI.Offset0;
691 unsigned NewOffset1 = CI.Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000692 unsigned Opc =
693 CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000694
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000695 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
696 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000697
698 if (NewOffset0 > NewOffset1) {
699 // Canonicalize the merged instruction so the smaller offset comes first.
700 std::swap(NewOffset0, NewOffset1);
701 std::swap(SubRegIdx0, SubRegIdx1);
702 }
703
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000704 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
Neil Henning76504a42018-12-12 16:15:21 +0000705 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000706
707 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000708
Neil Henning76504a42018-12-12 16:15:21 +0000709 const TargetRegisterClass *SuperRC =
710 (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000711 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
712
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000713 DebugLoc DL = CI.I->getDebugLoc();
714
715 unsigned BaseReg = AddrReg->getReg();
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000716 unsigned BaseSubReg = AddrReg->getSubReg();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000717 unsigned BaseRegFlags = 0;
718 if (CI.BaseOff) {
Mark Searles7687d422018-01-22 21:46:43 +0000719 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
720 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
Neil Henning76504a42018-12-12 16:15:21 +0000721 .addImm(CI.BaseOff);
Mark Searles7687d422018-01-22 21:46:43 +0000722
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000723 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
724 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000725
Mark Searles7687d422018-01-22 21:46:43 +0000726 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
Neil Henning76504a42018-12-12 16:15:21 +0000727 .addReg(ImmReg)
728 .addReg(AddrReg->getReg(), 0, BaseSubReg);
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000729 BaseSubReg = 0;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000730 }
731
Neil Henning76504a42018-12-12 16:15:21 +0000732 MachineInstrBuilder Read2 =
733 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
734 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
735 .addImm(NewOffset0) // offset0
736 .addImm(NewOffset1) // offset1
737 .addImm(0) // gds
738 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000739
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000740 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000741
Matt Arsenault84db5d92015-07-14 17:57:36 +0000742 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
743
744 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000745 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000746 .add(*Dest0) // Copy to same destination including flags and sub reg.
747 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000748 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000749 .add(*Dest1)
750 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000751
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000752 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000753
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000754 MachineBasicBlock::iterator Next = std::next(CI.I);
755 CI.I->eraseFromParent();
756 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000757
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000758 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000759 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000760}
761
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000762unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
763 if (STM->ldsRequiresM0Init())
764 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
Neil Henning76504a42018-12-12 16:15:21 +0000765 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
766 : AMDGPU::DS_WRITE2_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000767}
768
769unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
770 if (STM->ldsRequiresM0Init())
Neil Henning76504a42018-12-12 16:15:21 +0000771 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
772 : AMDGPU::DS_WRITE2ST64_B64;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000773
Neil Henning76504a42018-12-12 16:15:21 +0000774 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
775 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000776}
777
Neil Henning76504a42018-12-12 16:15:21 +0000778MachineBasicBlock::iterator
779SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000780 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000781
782 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
783 // sure we preserve the subregister index and any register flags set on them.
Neil Henning76504a42018-12-12 16:15:21 +0000784 const MachineOperand *AddrReg =
785 TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
786 const MachineOperand *Data0 =
787 TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
788 const MachineOperand *Data1 =
789 TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000790
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000791 unsigned NewOffset0 = CI.Offset0;
792 unsigned NewOffset1 = CI.Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000793 unsigned Opc =
794 CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000795
Tom Stellarde175d8a2016-08-26 21:36:47 +0000796 if (NewOffset0 > NewOffset1) {
797 // Canonicalize the merged instruction so the smaller offset comes first.
798 std::swap(NewOffset0, NewOffset1);
799 std::swap(Data0, Data1);
800 }
801
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000802 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
Neil Henning76504a42018-12-12 16:15:21 +0000803 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000804
805 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000806 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000807
Mark Searles7687d422018-01-22 21:46:43 +0000808 unsigned BaseReg = AddrReg->getReg();
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000809 unsigned BaseSubReg = AddrReg->getSubReg();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000810 unsigned BaseRegFlags = 0;
811 if (CI.BaseOff) {
Mark Searles7687d422018-01-22 21:46:43 +0000812 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
813 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
Neil Henning76504a42018-12-12 16:15:21 +0000814 .addImm(CI.BaseOff);
Mark Searles7687d422018-01-22 21:46:43 +0000815
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000816 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
817 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000818
Mark Searles7687d422018-01-22 21:46:43 +0000819 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
Neil Henning76504a42018-12-12 16:15:21 +0000820 .addReg(ImmReg)
821 .addReg(AddrReg->getReg(), 0, BaseSubReg);
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000822 BaseSubReg = 0;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000823 }
Matt Arsenault41033282014-10-10 22:01:59 +0000824
Neil Henning76504a42018-12-12 16:15:21 +0000825 MachineInstrBuilder Write2 =
826 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
827 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
828 .add(*Data0) // data0
829 .add(*Data1) // data1
830 .addImm(NewOffset0) // offset0
831 .addImm(NewOffset1) // offset1
832 .addImm(0) // gds
833 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Matt Arsenault41033282014-10-10 22:01:59 +0000834
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000835 moveInstsAfter(Write2, CI.InstsToMove);
836
837 MachineBasicBlock::iterator Next = std::next(CI.I);
838 CI.I->eraseFromParent();
839 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000840
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000841 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000842 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000843}
844
Neil Henning76504a42018-12-12 16:15:21 +0000845MachineBasicBlock::iterator
846SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000847 MachineBasicBlock *MBB = CI.I->getParent();
848 DebugLoc DL = CI.I->getDebugLoc();
Neil Henning76504a42018-12-12 16:15:21 +0000849 const unsigned Opcode = getNewOpcode(CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000850
Neil Henning76504a42018-12-12 16:15:21 +0000851 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
852
Marek Olsakb953cc32017-11-09 01:52:23 +0000853 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
854 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
855
856 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
857 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
858 .addImm(MergedOffset) // offset
859 .addImm(CI.GLC0) // glc
Chandler Carruthc73c0302018-08-16 21:30:05 +0000860 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Marek Olsakb953cc32017-11-09 01:52:23 +0000861
Neil Henning76504a42018-12-12 16:15:21 +0000862 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
863 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
864 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsakb953cc32017-11-09 01:52:23 +0000865
866 // Copy to the old destination registers.
867 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
868 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
869 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
870
871 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
872 .add(*Dest0) // Copy to same destination including flags and sub reg.
873 .addReg(DestReg, 0, SubRegIdx0);
874 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
875 .add(*Dest1)
876 .addReg(DestReg, RegState::Kill, SubRegIdx1);
877
878 moveInstsAfter(Copy1, CI.InstsToMove);
879
880 MachineBasicBlock::iterator Next = std::next(CI.I);
881 CI.I->eraseFromParent();
882 CI.Paired->eraseFromParent();
883 return Next;
884}
885
Neil Henning76504a42018-12-12 16:15:21 +0000886MachineBasicBlock::iterator
887SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000888 MachineBasicBlock *MBB = CI.I->getParent();
889 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000890
Neil Henning76504a42018-12-12 16:15:21 +0000891 const unsigned Opcode = getNewOpcode(CI);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000892
Neil Henning76504a42018-12-12 16:15:21 +0000893 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
894
895 // Copy to the new source register.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000896 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
897 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
898
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000899 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
900
Neil Henning76504a42018-12-12 16:15:21 +0000901 const unsigned Regs = getRegs(Opcode);
902
903 if (Regs & VADDR)
904 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000905
906 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
Marek Olsak6a0548a2017-11-09 01:52:30 +0000907 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
908 .addImm(MergedOffset) // offset
909 .addImm(CI.GLC0) // glc
910 .addImm(CI.SLC0) // slc
911 .addImm(0) // tfe
Chandler Carruthc73c0302018-08-16 21:30:05 +0000912 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Marek Olsak6a0548a2017-11-09 01:52:30 +0000913
Neil Henning76504a42018-12-12 16:15:21 +0000914 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
915 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
916 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000917
918 // Copy to the old destination registers.
919 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
920 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
921 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
922
923 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
924 .add(*Dest0) // Copy to same destination including flags and sub reg.
925 .addReg(DestReg, 0, SubRegIdx0);
926 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
927 .add(*Dest1)
928 .addReg(DestReg, RegState::Kill, SubRegIdx1);
929
930 moveInstsAfter(Copy1, CI.InstsToMove);
931
932 MachineBasicBlock::iterator Next = std::next(CI.I);
933 CI.I->eraseFromParent();
934 CI.Paired->eraseFromParent();
935 return Next;
936}
937
Neil Henning76504a42018-12-12 16:15:21 +0000938unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
939 const unsigned Width = CI.Width0 + CI.Width1;
Marek Olsak58410f32017-11-09 01:52:55 +0000940
Neil Henning76504a42018-12-12 16:15:21 +0000941 switch (CI.InstClass) {
942 default:
943 return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
944 case UNKNOWN:
945 llvm_unreachable("Unknown instruction class");
946 case S_BUFFER_LOAD_IMM:
947 switch (Width) {
948 default:
949 return 0;
950 case 2:
951 return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
952 case 4:
953 return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
954 }
Marek Olsak58410f32017-11-09 01:52:55 +0000955 }
Marek Olsak58410f32017-11-09 01:52:55 +0000956}
957
Neil Henning76504a42018-12-12 16:15:21 +0000958std::pair<unsigned, unsigned>
959SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
960 if (CI.Offset0 > CI.Offset1) {
961 switch (CI.Width0) {
962 default:
963 return std::make_pair(0, 0);
964 case 1:
965 switch (CI.Width1) {
966 default:
967 return std::make_pair(0, 0);
968 case 1:
969 return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
970 case 2:
971 return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
972 case 3:
973 return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
974 }
975 case 2:
976 switch (CI.Width1) {
977 default:
978 return std::make_pair(0, 0);
979 case 1:
980 return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
981 case 2:
982 return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
983 }
984 case 3:
985 switch (CI.Width1) {
986 default:
987 return std::make_pair(0, 0);
988 case 1:
989 return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
990 }
991 }
992 } else {
993 switch (CI.Width0) {
994 default:
995 return std::make_pair(0, 0);
996 case 1:
997 switch (CI.Width1) {
998 default:
999 return std::make_pair(0, 0);
1000 case 1:
1001 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
1002 case 2:
1003 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
1004 case 3:
1005 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
1006 }
1007 case 2:
1008 switch (CI.Width1) {
1009 default:
1010 return std::make_pair(0, 0);
1011 case 1:
1012 return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
1013 case 2:
1014 return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
1015 }
1016 case 3:
1017 switch (CI.Width1) {
1018 default:
1019 return std::make_pair(0, 0);
1020 case 1:
1021 return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
1022 }
1023 }
1024 }
1025}
1026
1027const TargetRegisterClass *
1028SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
1029 if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1030 switch (CI.Width0 + CI.Width1) {
1031 default:
1032 return nullptr;
1033 case 2:
1034 return &AMDGPU::SReg_64_XEXECRegClass;
1035 case 4:
1036 return &AMDGPU::SReg_128RegClass;
1037 case 8:
1038 return &AMDGPU::SReg_256RegClass;
1039 case 16:
1040 return &AMDGPU::SReg_512RegClass;
1041 }
1042 } else {
1043 switch (CI.Width0 + CI.Width1) {
1044 default:
1045 return nullptr;
1046 case 2:
1047 return &AMDGPU::VReg_64RegClass;
1048 case 3:
1049 return &AMDGPU::VReg_96RegClass;
1050 case 4:
1051 return &AMDGPU::VReg_128RegClass;
1052 }
1053 }
1054}
1055
1056MachineBasicBlock::iterator
1057SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
Marek Olsak58410f32017-11-09 01:52:55 +00001058 MachineBasicBlock *MBB = CI.I->getParent();
1059 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak58410f32017-11-09 01:52:55 +00001060
Neil Henning76504a42018-12-12 16:15:21 +00001061 const unsigned Opcode = getNewOpcode(CI);
Marek Olsak58410f32017-11-09 01:52:55 +00001062
Neil Henning76504a42018-12-12 16:15:21 +00001063 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1064 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1065 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsak58410f32017-11-09 01:52:55 +00001066
1067 // Copy to the new source register.
Neil Henning76504a42018-12-12 16:15:21 +00001068 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
Marek Olsak58410f32017-11-09 01:52:55 +00001069 unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
1070
1071 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1072 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1073
1074 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1075 .add(*Src0)
1076 .addImm(SubRegIdx0)
1077 .add(*Src1)
1078 .addImm(SubRegIdx1);
1079
1080 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
Neil Henning76504a42018-12-12 16:15:21 +00001081 .addReg(SrcReg, RegState::Kill);
Marek Olsak58410f32017-11-09 01:52:55 +00001082
Neil Henning76504a42018-12-12 16:15:21 +00001083 const unsigned Regs = getRegs(Opcode);
1084
1085 if (Regs & VADDR)
1086 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
Marek Olsak58410f32017-11-09 01:52:55 +00001087
1088 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1089 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1090 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
Chandler Carruthc73c0302018-08-16 21:30:05 +00001091 .addImm(CI.GLC0) // glc
1092 .addImm(CI.SLC0) // slc
1093 .addImm(0) // tfe
1094 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Marek Olsak58410f32017-11-09 01:52:55 +00001095
1096 moveInstsAfter(MIB, CI.InstsToMove);
1097
1098 MachineBasicBlock::iterator Next = std::next(CI.I);
1099 CI.I->eraseFromParent();
1100 CI.Paired->eraseFromParent();
1101 return Next;
1102}
1103
Farhana Aleence095c52018-12-14 21:13:14 +00001104MachineOperand
1105SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) {
1106 APInt V(32, Val, true);
1107 if (TII->isInlineConstant(V))
1108 return MachineOperand::CreateImm(Val);
1109
1110 unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1111 MachineInstr *Mov =
1112 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1113 TII->get(AMDGPU::S_MOV_B32), Reg)
1114 .addImm(Val);
Simon Pilgrim9831d402018-12-15 12:25:22 +00001115 (void)Mov;
Farhana Aleence095c52018-12-14 21:13:14 +00001116 LLVM_DEBUG(dbgs() << " "; Mov->dump());
1117 return MachineOperand::CreateReg(Reg, false);
1118}
1119
1120// Compute base address using Addr and return the final register.
1121unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1122 const MemAddress &Addr) {
1123 MachineBasicBlock *MBB = MI.getParent();
1124 MachineBasicBlock::iterator MBBI = MI.getIterator();
1125 DebugLoc DL = MI.getDebugLoc();
1126
1127 assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1128 Addr.Base.LoSubReg) &&
1129 "Expected 32-bit Base-Register-Low!!");
1130
1131 assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1132 Addr.Base.HiSubReg) &&
1133 "Expected 32-bit Base-Register-Hi!!");
1134
1135 LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
1136 MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1137 MachineOperand OffsetHi =
1138 createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1139 unsigned CarryReg = MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
1140 unsigned DeadCarryReg =
1141 MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
1142
1143 unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1144 unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1145 MachineInstr *LoHalf =
1146 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1147 .addReg(CarryReg, RegState::Define)
1148 .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1149 .add(OffsetLo);
Simon Pilgrim9831d402018-12-15 12:25:22 +00001150 (void)LoHalf;
Farhana Aleence095c52018-12-14 21:13:14 +00001151 LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
1152
1153 MachineInstr *HiHalf =
1154 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1155 .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1156 .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1157 .add(OffsetHi)
1158 .addReg(CarryReg, RegState::Kill);
Simon Pilgrim9831d402018-12-15 12:25:22 +00001159 (void)HiHalf;
Farhana Aleence095c52018-12-14 21:13:14 +00001160 LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
1161
1162 unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1163 MachineInstr *FullBase =
1164 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1165 .addReg(DestSub0)
1166 .addImm(AMDGPU::sub0)
1167 .addReg(DestSub1)
1168 .addImm(AMDGPU::sub1);
Simon Pilgrim9831d402018-12-15 12:25:22 +00001169 (void)FullBase;
Farhana Aleence095c52018-12-14 21:13:14 +00001170 LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
1171
1172 return FullDestReg;
1173}
1174
1175// Update base and offset with the NewBase and NewOffset in MI.
1176void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1177 unsigned NewBase,
1178 int32_t NewOffset) {
1179 TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
1180 TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1181}
1182
1183Optional<int32_t>
1184SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) {
1185 if (Op.isImm())
1186 return Op.getImm();
1187
1188 if (!Op.isReg())
1189 return None;
1190
1191 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1192 if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1193 !Def->getOperand(1).isImm())
1194 return None;
1195
1196 return Def->getOperand(1).getImm();
1197}
1198
1199// Analyze Base and extracts:
1200// - 32bit base registers, subregisters
1201// - 64bit constant offset
1202// Expecting base computation as:
1203// %OFFSET0:sgpr_32 = S_MOV_B32 8000
1204// %LO:vgpr_32, %c:sreg_64_xexec =
1205// V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1206// %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1207// %Base:vreg_64 =
1208// REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1209void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1210 MemAddress &Addr) {
1211 if (!Base.isReg())
1212 return;
1213
1214 MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1215 if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1216 || Def->getNumOperands() != 5)
1217 return;
1218
1219 MachineOperand BaseLo = Def->getOperand(1);
1220 MachineOperand BaseHi = Def->getOperand(3);
1221 if (!BaseLo.isReg() || !BaseHi.isReg())
1222 return;
1223
1224 MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1225 MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1226
1227 if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1228 !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1229 return;
1230
1231 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1232 const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1233
1234 auto Offset0P = extractConstOffset(*Src0);
1235 if (Offset0P)
1236 BaseLo = *Src1;
1237 else {
1238 if (!(Offset0P = extractConstOffset(*Src1)))
1239 return;
1240 BaseLo = *Src0;
1241 }
1242
1243 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1244 Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1245
1246 if (Src0->isImm())
1247 std::swap(Src0, Src1);
1248
1249 if (!Src1->isImm())
1250 return;
1251
1252 assert(isInt<32>(*Offset0P) && isInt<32>(Src1->getImm())
1253 && "Expected 32bit immediate!!!");
1254 uint64_t Offset1 = Src1->getImm();
1255 BaseHi = *Src0;
1256
1257 Addr.Base.LoReg = BaseLo.getReg();
1258 Addr.Base.HiReg = BaseHi.getReg();
1259 Addr.Base.LoSubReg = BaseLo.getSubReg();
1260 Addr.Base.HiSubReg = BaseHi.getSubReg();
1261 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1262}
1263
1264bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1265 MachineInstr &MI,
1266 MemInfoMap &Visited,
1267 SmallPtrSet<MachineInstr *, 4> &AnchorList) {
1268
1269 // TODO: Support flat and scratch.
1270 if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0 ||
1271 TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1272 return false;
1273
1274 // TODO: Support Store.
1275 if (!MI.mayLoad())
1276 return false;
1277
1278 if (AnchorList.count(&MI))
1279 return false;
1280
1281 LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1282
1283 if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1284 LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
1285 return false;
1286 }
1287
1288 // Step1: Find the base-registers and a 64bit constant offset.
1289 MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1290 MemAddress MAddr;
1291 if (Visited.find(&MI) == Visited.end()) {
1292 processBaseWithConstOffset(Base, MAddr);
1293 Visited[&MI] = MAddr;
1294 } else
1295 MAddr = Visited[&MI];
1296
1297 if (MAddr.Offset == 0) {
1298 LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
1299 " constant offsets that can be promoted.\n";);
1300 return false;
1301 }
1302
1303 LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
1304 << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1305
1306 // Step2: Traverse through MI's basic block and find an anchor(that has the
1307 // same base-registers) with the highest 13bit distance from MI's offset.
1308 // E.g. (64bit loads)
1309 // bb:
1310 // addr1 = &a + 4096; load1 = load(addr1, 0)
1311 // addr2 = &a + 6144; load2 = load(addr2, 0)
1312 // addr3 = &a + 8192; load3 = load(addr3, 0)
1313 // addr4 = &a + 10240; load4 = load(addr4, 0)
1314 // addr5 = &a + 12288; load5 = load(addr5, 0)
1315 //
1316 // Starting from the first load, the optimization will try to find a new base
1317 // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1318 // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1319 // as the new-base(anchor) because of the maximum distance which can
1320 // accomodate more intermediate bases presumeably.
1321 //
1322 // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1323 // (&a + 8192) for load1, load2, load4.
1324 // addr = &a + 8192
1325 // load1 = load(addr, -4096)
1326 // load2 = load(addr, -2048)
1327 // load3 = load(addr, 0)
1328 // load4 = load(addr, 2048)
1329 // addr5 = &a + 12288; load5 = load(addr5, 0)
1330 //
1331 MachineInstr *AnchorInst = nullptr;
1332 MemAddress AnchorAddr;
1333 uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1334 SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1335
1336 MachineBasicBlock *MBB = MI.getParent();
1337 MachineBasicBlock::iterator E = MBB->end();
1338 MachineBasicBlock::iterator MBBI = MI.getIterator();
1339 ++MBBI;
1340 const SITargetLowering *TLI =
1341 static_cast<const SITargetLowering *>(STM->getTargetLowering());
1342
1343 for ( ; MBBI != E; ++MBBI) {
1344 MachineInstr &MINext = *MBBI;
1345 // TODO: Support finding an anchor(with same base) from store addresses or
1346 // any other load addresses where the opcodes are different.
1347 if (MINext.getOpcode() != MI.getOpcode() ||
1348 TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1349 continue;
1350
1351 const MachineOperand &BaseNext =
1352 *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1353 MemAddress MAddrNext;
1354 if (Visited.find(&MINext) == Visited.end()) {
1355 processBaseWithConstOffset(BaseNext, MAddrNext);
1356 Visited[&MINext] = MAddrNext;
1357 } else
1358 MAddrNext = Visited[&MINext];
1359
1360 if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1361 MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1362 MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1363 MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1364 continue;
1365
1366 InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1367
1368 int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1369 TargetLoweringBase::AddrMode AM;
1370 AM.HasBaseReg = true;
1371 AM.BaseOffs = Dist;
1372 if (TLI->isLegalGlobalAddressingMode(AM) &&
Florian Hahnabe32c92018-12-15 01:32:58 +00001373 (uint32_t)std::abs(Dist) > MaxDist) {
1374 MaxDist = std::abs(Dist);
Farhana Aleence095c52018-12-14 21:13:14 +00001375
1376 AnchorAddr = MAddrNext;
1377 AnchorInst = &MINext;
1378 }
1379 }
1380
1381 if (AnchorInst) {
1382 LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
1383 AnchorInst->dump());
1384 LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
1385 << AnchorAddr.Offset << "\n\n");
1386
1387 // Instead of moving up, just re-compute anchor-instruction's base address.
1388 unsigned Base = computeBase(MI, AnchorAddr);
1389
1390 updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1391 LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
1392
1393 for (auto P : InstsWCommonBase) {
1394 TargetLoweringBase::AddrMode AM;
1395 AM.HasBaseReg = true;
1396 AM.BaseOffs = P.second - AnchorAddr.Offset;
1397
1398 if (TLI->isLegalGlobalAddressingMode(AM)) {
1399 LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
1400 dbgs() << ")"; P.first->dump());
1401 updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1402 LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
1403 }
1404 }
1405 AnchorList.insert(AnchorInst);
1406 return true;
1407 }
1408
1409 return false;
1410}
1411
Matt Arsenault41033282014-10-10 22:01:59 +00001412// Scan through looking for adjacent LDS operations with constant offsets from
1413// the same base register. We rely on the scheduler to do the hard work of
1414// clustering nearby loads, and assume these are all adjacent.
1415bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +00001416 bool Modified = false;
1417
Farhana Aleence095c52018-12-14 21:13:14 +00001418 // Contain the list
1419 MemInfoMap Visited;
1420 // Contains the list of instructions for which constant offsets are being
1421 // promoted to the IMM.
1422 SmallPtrSet<MachineInstr *, 4> AnchorList;
1423
Matt Arsenault41033282014-10-10 22:01:59 +00001424 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
1425 MachineInstr &MI = *I;
1426
Farhana Aleence095c52018-12-14 21:13:14 +00001427 if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1428 Modified = true;
1429
Matt Arsenault41033282014-10-10 22:01:59 +00001430 // Don't combine if volatile.
1431 if (MI.hasOrderedMemoryRef()) {
1432 ++I;
1433 continue;
1434 }
1435
Neil Henning76504a42018-12-12 16:15:21 +00001436 const unsigned Opc = MI.getOpcode();
1437
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001438 CombineInfo CI;
1439 CI.I = I;
Neil Henning76504a42018-12-12 16:15:21 +00001440 CI.InstClass = getInstClass(Opc);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +00001441
Neil Henning76504a42018-12-12 16:15:21 +00001442 switch (CI.InstClass) {
1443 default:
1444 break;
1445 case DS_READ:
Matt Arsenault3f71c0e2017-11-29 00:55:57 +00001446 CI.EltSize =
Neil Henning76504a42018-12-12 16:15:21 +00001447 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
1448 : 4;
Marek Olsakb953cc32017-11-09 01:52:23 +00001449 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +00001450 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001451 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +00001452 } else {
1453 ++I;
1454 }
Matt Arsenault41033282014-10-10 22:01:59 +00001455 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001456 case DS_WRITE:
1457 CI.EltSize =
1458 (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
1459 : 4;
Marek Olsakb953cc32017-11-09 01:52:23 +00001460 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +00001461 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001462 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +00001463 } else {
1464 ++I;
1465 }
Matt Arsenault41033282014-10-10 22:01:59 +00001466 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001467 case S_BUFFER_LOAD_IMM:
Marek Olsakb953cc32017-11-09 01:52:23 +00001468 CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
Marek Olsakb953cc32017-11-09 01:52:23 +00001469 if (findMatchingInst(CI)) {
1470 Modified = true;
1471 I = mergeSBufferLoadImmPair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001472 OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
Marek Olsakb953cc32017-11-09 01:52:23 +00001473 } else {
1474 ++I;
1475 }
1476 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001477 case BUFFER_LOAD_OFFEN:
1478 case BUFFER_LOAD_OFFSET:
1479 case BUFFER_LOAD_OFFEN_exact:
1480 case BUFFER_LOAD_OFFSET_exact:
Marek Olsak6a0548a2017-11-09 01:52:30 +00001481 CI.EltSize = 4;
Marek Olsak6a0548a2017-11-09 01:52:30 +00001482 if (findMatchingInst(CI)) {
1483 Modified = true;
Marek Olsak4c421a2d2017-11-09 01:52:36 +00001484 I = mergeBufferLoadPair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001485 OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
Marek Olsak6a0548a2017-11-09 01:52:30 +00001486 } else {
1487 ++I;
1488 }
1489 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001490 case BUFFER_STORE_OFFEN:
1491 case BUFFER_STORE_OFFSET:
1492 case BUFFER_STORE_OFFEN_exact:
1493 case BUFFER_STORE_OFFSET_exact:
Marek Olsak58410f32017-11-09 01:52:55 +00001494 CI.EltSize = 4;
Marek Olsak58410f32017-11-09 01:52:55 +00001495 if (findMatchingInst(CI)) {
1496 Modified = true;
1497 I = mergeBufferStorePair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001498 OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
Marek Olsak58410f32017-11-09 01:52:55 +00001499 } else {
1500 ++I;
1501 }
1502 continue;
1503 }
1504
Matt Arsenault41033282014-10-10 22:01:59 +00001505 ++I;
1506 }
1507
1508 return Modified;
1509}
1510
1511bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +00001512 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +00001513 return false;
1514
Tom Stellard5bfbae52018-07-11 20:59:01 +00001515 STM = &MF.getSubtarget<GCNSubtarget>();
Marek Olsakb953cc32017-11-09 01:52:23 +00001516 if (!STM->loadStoreOptEnabled())
Matt Arsenault03d85842016-06-27 20:32:13 +00001517 return false;
1518
Marek Olsakb953cc32017-11-09 01:52:23 +00001519 TII = STM->getInstrInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001520 TRI = &TII->getRegisterInfo();
1521
Matt Arsenault41033282014-10-10 22:01:59 +00001522 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +00001523 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +00001524
Matt Arsenault67e72de2017-08-31 01:53:09 +00001525 assert(MRI->isSSA() && "Must be run on SSA");
1526
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001527 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
Matt Arsenault41033282014-10-10 22:01:59 +00001528
Matt Arsenault41033282014-10-10 22:01:59 +00001529 bool Modified = false;
1530
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +00001531 for (MachineBasicBlock &MBB : MF) {
Neil Henning76504a42018-12-12 16:15:21 +00001532 do {
1533 OptimizeAgain = false;
Marek Olsakb953cc32017-11-09 01:52:23 +00001534 Modified |= optimizeBlock(MBB);
Neil Henning76504a42018-12-12 16:15:21 +00001535 } while (OptimizeAgain);
Marek Olsakb953cc32017-11-09 01:52:23 +00001536 }
1537
Matt Arsenault41033282014-10-10 22:01:59 +00001538 return Modified;
1539}