blob: 4b537540046fea5c1c7a9190c45fc8ebed5602b0 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +000017// The same is done for certain SMEM and VMEM opcodes, e.g.:
Marek Olsakb953cc32017-11-09 01:52:23 +000018// s_buffer_load_dword s4, s[0:3], 4
19// s_buffer_load_dword s5, s[0:3], 8
20// ==>
21// s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22//
Matt Arsenault41033282014-10-10 22:01:59 +000023//
24// Future improvements:
25//
26// - This currently relies on the scheduler to place loads and stores next to
27// each other, and then only merges adjacent pairs of instructions. It would
28// be good to be more flexible with interleaved instructions, and possibly run
29// before scheduling. It currently missing stores of constants because loading
30// the constant into the data register is placed between the stores, although
31// this is arguably a scheduling problem.
32//
33// - Live interval recomputing seems inefficient. This currently only matches
34// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000035// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000036//
37// - With a list of instructions to process, we can also merge more. If a
38// cluster of loads have offsets that are too large to fit in the 8-bit
39// offsets, but are close enough to fit in the 8 bits, we can add to the base
40// pointer and use the new reduced offsets.
41//
42//===----------------------------------------------------------------------===//
43
44#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000045#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000046#include "SIInstrInfo.h"
47#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000048#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000049#include "Utils/AMDGPUBaseInfo.h"
50#include "llvm/ADT/ArrayRef.h"
51#include "llvm/ADT/SmallVector.h"
52#include "llvm/ADT/StringRef.h"
53#include "llvm/Analysis/AliasAnalysis.h"
54#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000055#include "llvm/CodeGen/MachineFunction.h"
56#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000057#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000058#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000059#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000060#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000061#include "llvm/IR/DebugLoc.h"
62#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000063#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000064#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000065#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000066#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000067#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000068#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000069#include <iterator>
70#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000071
72using namespace llvm;
73
74#define DEBUG_TYPE "si-load-store-opt"
75
76namespace {
77
78class SILoadStoreOptimizer : public MachineFunctionPass {
Marek Olsak6a0548a2017-11-09 01:52:30 +000079 enum InstClassEnum {
80 DS_READ_WRITE,
81 S_BUFFER_LOAD_IMM,
82 BUFFER_LOAD_OFFEN,
Marek Olsak4c421a2d2017-11-09 01:52:36 +000083 BUFFER_LOAD_OFFSET,
Marek Olsak58410f32017-11-09 01:52:55 +000084 BUFFER_STORE_OFFEN,
85 BUFFER_STORE_OFFSET,
Marek Olsak6a0548a2017-11-09 01:52:30 +000086 };
87
NAKAMURA Takumiaba2b3d2017-10-10 08:30:53 +000088 struct CombineInfo {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000089 MachineBasicBlock::iterator I;
90 MachineBasicBlock::iterator Paired;
91 unsigned EltSize;
92 unsigned Offset0;
93 unsigned Offset1;
94 unsigned BaseOff;
Marek Olsak6a0548a2017-11-09 01:52:30 +000095 InstClassEnum InstClass;
Marek Olsakb953cc32017-11-09 01:52:23 +000096 bool GLC0;
97 bool GLC1;
Marek Olsak6a0548a2017-11-09 01:52:30 +000098 bool SLC0;
99 bool SLC1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000100 bool UseST64;
Marek Olsakb953cc32017-11-09 01:52:23 +0000101 bool IsX2;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000102 SmallVector<MachineInstr*, 8> InstsToMove;
Eugene Zelenko59e12822017-08-08 00:47:13 +0000103 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000104
Matt Arsenault41033282014-10-10 22:01:59 +0000105private:
Tom Stellard5bfbae52018-07-11 20:59:01 +0000106 const GCNSubtarget *STM = nullptr;
Eugene Zelenko66203762017-01-21 00:53:49 +0000107 const SIInstrInfo *TII = nullptr;
108 const SIRegisterInfo *TRI = nullptr;
109 MachineRegisterInfo *MRI = nullptr;
110 AliasAnalysis *AA = nullptr;
Marek Olsakb953cc32017-11-09 01:52:23 +0000111 unsigned CreatedX2;
Matt Arsenault41033282014-10-10 22:01:59 +0000112
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000113 static bool offsetsCanBeCombined(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000114
Marek Olsakb953cc32017-11-09 01:52:23 +0000115 bool findMatchingInst(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000116
117 unsigned read2Opcode(unsigned EltSize) const;
118 unsigned read2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000119 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000120
121 unsigned write2Opcode(unsigned EltSize) const;
122 unsigned write2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000123 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000124 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000125 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
Marek Olsak58410f32017-11-09 01:52:55 +0000126 unsigned promoteBufferStoreOpcode(const MachineInstr &I, bool &IsX2,
127 bool &IsOffen) const;
128 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000129
130public:
131 static char ID;
132
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000133 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000134 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
135 }
136
137 bool optimizeBlock(MachineBasicBlock &MBB);
138
139 bool runOnMachineFunction(MachineFunction &MF) override;
140
Mark Searles7687d422018-01-22 21:46:43 +0000141 StringRef getPassName() const override { return "SI Load Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000142
143 void getAnalysisUsage(AnalysisUsage &AU) const override {
144 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000145 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000146
147 MachineFunctionPass::getAnalysisUsage(AU);
148 }
149};
150
Eugene Zelenko66203762017-01-21 00:53:49 +0000151} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000152
153INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
Mark Searles7687d422018-01-22 21:46:43 +0000154 "SI Load Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000155INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000156INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
Mark Searles7687d422018-01-22 21:46:43 +0000157 "SI Load Store Optimizer", false, false)
Matt Arsenault41033282014-10-10 22:01:59 +0000158
159char SILoadStoreOptimizer::ID = 0;
160
161char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
162
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000163FunctionPass *llvm::createSILoadStoreOptimizerPass() {
164 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000165}
166
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000167static void moveInstsAfter(MachineBasicBlock::iterator I,
168 ArrayRef<MachineInstr*> InstsToMove) {
169 MachineBasicBlock *MBB = I->getParent();
170 ++I;
171 for (MachineInstr *MI : InstsToMove) {
172 MI->removeFromParent();
173 MBB->insert(I, MI);
174 }
175}
176
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000177static void addDefsUsesToList(const MachineInstr &MI,
178 DenseSet<unsigned> &RegDefs,
179 DenseSet<unsigned> &PhysRegUses) {
180 for (const MachineOperand &Op : MI.operands()) {
181 if (Op.isReg()) {
182 if (Op.isDef())
183 RegDefs.insert(Op.getReg());
184 else if (Op.readsReg() &&
185 TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
186 PhysRegUses.insert(Op.getReg());
187 }
Matt Arsenaultb02cebf2018-02-08 01:56:14 +0000188 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000189}
190
Eugene Zelenko66203762017-01-21 00:53:49 +0000191static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
192 MachineBasicBlock::iterator B,
193 const SIInstrInfo *TII,
194 AliasAnalysis * AA) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000195 // RAW or WAR - cannot reorder
196 // WAW - cannot reorder
197 // RAR - safe to reorder
198 return !(A->mayStore() || B->mayStore()) ||
199 TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
Alexander Timofeevf867a402016-11-03 14:37:13 +0000200}
201
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000202// Add MI and its defs to the lists if MI reads one of the defs that are
203// already in the list. Returns true in that case.
204static bool
205addToListsIfDependent(MachineInstr &MI,
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000206 DenseSet<unsigned> &RegDefs,
207 DenseSet<unsigned> &PhysRegUses,
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000208 SmallVectorImpl<MachineInstr*> &Insts) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000209 for (MachineOperand &Use : MI.operands()) {
210 // If one of the defs is read, then there is a use of Def between I and the
211 // instruction that I will potentially be merged with. We will need to move
212 // this instruction after the merged instructions.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000213 //
214 // Similarly, if there is a def which is read by an instruction that is to
215 // be moved for merging, then we need to move the def-instruction as well.
216 // This can only happen for physical registers such as M0; virtual
217 // registers are in SSA form.
218 if (Use.isReg() &&
219 ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
220 (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
221 PhysRegUses.count(Use.getReg())))) {
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000222 Insts.push_back(&MI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000223 addDefsUsesToList(MI, RegDefs, PhysRegUses);
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000224 return true;
225 }
226 }
227
228 return false;
229}
230
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000231static bool
232canMoveInstsAcrossMemOp(MachineInstr &MemOp,
233 ArrayRef<MachineInstr*> InstsToMove,
234 const SIInstrInfo *TII,
235 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000236 assert(MemOp.mayLoadOrStore());
237
238 for (MachineInstr *InstToMove : InstsToMove) {
239 if (!InstToMove->mayLoadOrStore())
240 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000241 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
242 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000243 }
244 return true;
245}
246
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000247bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000248 // XXX - Would the same offset be OK? Is there any reason this would happen or
249 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000250 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000251 return false;
252
253 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000254 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000255 return false;
256
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000257 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
258 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
259 CI.UseST64 = false;
260 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000261
Marek Olsak58410f32017-11-09 01:52:55 +0000262 // Handle SMEM and VMEM instructions.
263 if (CI.InstClass != DS_READ_WRITE) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000264 unsigned Diff = CI.IsX2 ? 2 : 1;
265 return (EltOffset0 + Diff == EltOffset1 ||
266 EltOffset1 + Diff == EltOffset0) &&
Marek Olsak6a0548a2017-11-09 01:52:30 +0000267 CI.GLC0 == CI.GLC1 &&
268 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
Marek Olsakb953cc32017-11-09 01:52:23 +0000269 }
270
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000271 // If the offset in elements doesn't fit in 8-bits, we might be able to use
272 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000273 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
274 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
275 CI.Offset0 = EltOffset0 / 64;
276 CI.Offset1 = EltOffset1 / 64;
277 CI.UseST64 = true;
278 return true;
279 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000280
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000281 // Check if the new offsets fit in the reduced 8-bit range.
282 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
283 CI.Offset0 = EltOffset0;
284 CI.Offset1 = EltOffset1;
285 return true;
286 }
287
288 // Try to shift base address to decrease offsets.
289 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
290 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
291
292 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
293 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
294 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
295 CI.UseST64 = true;
296 return true;
297 }
298
299 if (isUInt<8>(OffsetDiff)) {
300 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
301 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
302 return true;
303 }
304
305 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000306}
307
Marek Olsakb953cc32017-11-09 01:52:23 +0000308bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000309 MachineBasicBlock *MBB = CI.I->getParent();
310 MachineBasicBlock::iterator E = MBB->end();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000311 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000312
Marek Olsak6a0548a2017-11-09 01:52:30 +0000313 unsigned AddrOpName[3] = {0};
314 int AddrIdx[3];
315 const MachineOperand *AddrReg[3];
316 unsigned NumAddresses = 0;
Marek Olsakb953cc32017-11-09 01:52:23 +0000317
Marek Olsak6a0548a2017-11-09 01:52:30 +0000318 switch (CI.InstClass) {
319 case DS_READ_WRITE:
320 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
321 break;
322 case S_BUFFER_LOAD_IMM:
323 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
324 break;
325 case BUFFER_LOAD_OFFEN:
Marek Olsak58410f32017-11-09 01:52:55 +0000326 case BUFFER_STORE_OFFEN:
Marek Olsak6a0548a2017-11-09 01:52:30 +0000327 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
328 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
329 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
330 break;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000331 case BUFFER_LOAD_OFFSET:
Marek Olsak58410f32017-11-09 01:52:55 +0000332 case BUFFER_STORE_OFFSET:
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000333 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
334 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
335 break;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000336 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000337
Marek Olsak6a0548a2017-11-09 01:52:30 +0000338 for (unsigned i = 0; i < NumAddresses; i++) {
339 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
340 AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
341
342 // We only ever merge operations with the same base address register, so don't
343 // bother scanning forward if there are no other uses.
344 if (AddrReg[i]->isReg() &&
345 (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
346 MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
347 return false;
348 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000349
Matt Arsenault41033282014-10-10 22:01:59 +0000350 ++MBBI;
351
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000352 DenseSet<unsigned> RegDefsToMove;
353 DenseSet<unsigned> PhysRegUsesToMove;
354 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000355
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000356 for ( ; MBBI != E; ++MBBI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000357 if (MBBI->getOpcode() != CI.I->getOpcode()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000358 // This is not a matching DS instruction, but we can keep looking as
359 // long as one of these conditions are met:
360 // 1. It is safe to move I down past MBBI.
361 // 2. It is safe to move MBBI down past the instruction that I will
362 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000363
Matt Arsenault2d69c922017-08-29 21:25:51 +0000364 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000365 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000366 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000367 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000368 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000369
370 if (MBBI->mayLoadOrStore() &&
Nicolai Haehnledd059c12017-11-22 12:25:21 +0000371 (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
372 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000373 // We fail condition #1, but we may still be able to satisfy condition
374 // #2. Add this instruction to the move list and then we will check
375 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000376 CI.InstsToMove.push_back(&*MBBI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000377 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000378 continue;
379 }
380
381 // When we match I with another DS instruction we will be moving I down
382 // to the location of the matched instruction any uses of I will need to
383 // be moved down as well.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000384 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
385 CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000386 continue;
387 }
388
389 // Don't merge volatiles.
390 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000391 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000392
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000393 // Handle a case like
394 // DS_WRITE_B32 addr, v, idx0
395 // w = DS_READ_B32 addr, idx0
396 // DS_WRITE_B32 addr, f(w), idx1
397 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
398 // merging of the two writes.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000399 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
400 CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000401 continue;
402
Marek Olsak6a0548a2017-11-09 01:52:30 +0000403 bool Match = true;
404 for (unsigned i = 0; i < NumAddresses; i++) {
405 const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000406
Marek Olsak6a0548a2017-11-09 01:52:30 +0000407 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
408 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
409 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
410 Match = false;
411 break;
412 }
413 continue;
414 }
415
416 // Check same base pointer. Be careful of subregisters, which can occur with
417 // vectors of pointers.
418 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
419 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
420 Match = false;
421 break;
422 }
423 }
424
425 if (Match) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000426 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000427 AMDGPU::OpName::offset);
Marek Olsakb953cc32017-11-09 01:52:23 +0000428 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
429 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000430 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000431
Marek Olsak6a0548a2017-11-09 01:52:30 +0000432 if (CI.InstClass == DS_READ_WRITE) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000433 CI.Offset0 &= 0xffff;
434 CI.Offset1 &= 0xffff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000435 } else {
436 CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
437 CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000438 if (CI.InstClass != S_BUFFER_LOAD_IMM) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000439 CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
440 CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
441 }
Marek Olsakb953cc32017-11-09 01:52:23 +0000442 }
443
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000444 // Check both offsets fit in the reduced range.
445 // We also need to go through the list of instructions that we plan to
446 // move and make sure they are all safe to move down past the merged
447 // instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000448 if (offsetsCanBeCombined(CI))
449 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
450 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000451 }
452
453 // We've found a load/store that we couldn't merge for some reason.
454 // We could potentially keep looking, but we'd need to make sure that
455 // it was safe to move I and also all the instruction in InstsToMove
456 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000457 // check if we can move I across MBBI and if we can move all I's users
458 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000459 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000460 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000461 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000462 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000463}
464
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000465unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
466 if (STM->ldsRequiresM0Init())
467 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
468 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
469}
470
471unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
472 if (STM->ldsRequiresM0Init())
473 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
474
475 return (EltSize == 4) ?
476 AMDGPU::DS_READ2ST64_B32_gfx9 : AMDGPU::DS_READ2ST64_B64_gfx9;
477}
478
Matt Arsenault41033282014-10-10 22:01:59 +0000479MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000480 CombineInfo &CI) {
481 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000482
483 // Be careful, since the addresses could be subregisters themselves in weird
484 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000485 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000486
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000487 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
488 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000489
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000490 unsigned NewOffset0 = CI.Offset0;
491 unsigned NewOffset1 = CI.Offset1;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000492 unsigned Opc = CI.UseST64 ?
493 read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000494
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000495 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
496 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000497
498 if (NewOffset0 > NewOffset1) {
499 // Canonicalize the merged instruction so the smaller offset comes first.
500 std::swap(NewOffset0, NewOffset1);
501 std::swap(SubRegIdx0, SubRegIdx1);
502 }
503
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000504 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
505 (NewOffset0 != NewOffset1) &&
506 "Computed offset doesn't fit");
507
508 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000509
510 const TargetRegisterClass *SuperRC
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000511 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000512 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
513
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000514 DebugLoc DL = CI.I->getDebugLoc();
515
516 unsigned BaseReg = AddrReg->getReg();
517 unsigned BaseRegFlags = 0;
518 if (CI.BaseOff) {
Mark Searles7687d422018-01-22 21:46:43 +0000519 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
520 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
521 .addImm(CI.BaseOff);
522
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000523 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
524 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000525
Mark Searles7687d422018-01-22 21:46:43 +0000526 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
527 .addReg(ImmReg)
Matt Arsenault84445dd2017-11-30 22:51:26 +0000528 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000529 }
530
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000531 MachineInstrBuilder Read2 =
532 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
533 .addReg(BaseReg, BaseRegFlags) // addr
534 .addImm(NewOffset0) // offset0
535 .addImm(NewOffset1) // offset1
536 .addImm(0) // gds
537 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
538
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000539 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000540
Matt Arsenault84db5d92015-07-14 17:57:36 +0000541 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
542
543 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000544 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000545 .add(*Dest0) // Copy to same destination including flags and sub reg.
546 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000547 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000548 .add(*Dest1)
549 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000550
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000551 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000552
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000553 MachineBasicBlock::iterator Next = std::next(CI.I);
554 CI.I->eraseFromParent();
555 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000556
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000557 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000558 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000559}
560
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000561unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
562 if (STM->ldsRequiresM0Init())
563 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
564 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9 : AMDGPU::DS_WRITE2_B64_gfx9;
565}
566
567unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
568 if (STM->ldsRequiresM0Init())
569 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
570
571 return (EltSize == 4) ?
572 AMDGPU::DS_WRITE2ST64_B32_gfx9 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
573}
574
Matt Arsenault41033282014-10-10 22:01:59 +0000575MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000576 CombineInfo &CI) {
577 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000578
579 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
580 // sure we preserve the subregister index and any register flags set on them.
Mark Searles7687d422018-01-22 21:46:43 +0000581 const MachineOperand *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000582 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000583 const MachineOperand *Data1
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000584 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000585
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000586 unsigned NewOffset0 = CI.Offset0;
587 unsigned NewOffset1 = CI.Offset1;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000588 unsigned Opc = CI.UseST64 ?
589 write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000590
Tom Stellarde175d8a2016-08-26 21:36:47 +0000591 if (NewOffset0 > NewOffset1) {
592 // Canonicalize the merged instruction so the smaller offset comes first.
593 std::swap(NewOffset0, NewOffset1);
594 std::swap(Data0, Data1);
595 }
596
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000597 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
598 (NewOffset0 != NewOffset1) &&
599 "Computed offset doesn't fit");
600
601 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000602 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000603
Mark Searles7687d422018-01-22 21:46:43 +0000604 unsigned BaseReg = AddrReg->getReg();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000605 unsigned BaseRegFlags = 0;
606 if (CI.BaseOff) {
Mark Searles7687d422018-01-22 21:46:43 +0000607 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
608 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
609 .addImm(CI.BaseOff);
610
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000611 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
612 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000613
Mark Searles7687d422018-01-22 21:46:43 +0000614 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
615 .addReg(ImmReg)
616 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000617 }
Matt Arsenault41033282014-10-10 22:01:59 +0000618
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000619 MachineInstrBuilder Write2 =
620 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
621 .addReg(BaseReg, BaseRegFlags) // addr
622 .add(*Data0) // data0
623 .add(*Data1) // data1
624 .addImm(NewOffset0) // offset0
625 .addImm(NewOffset1) // offset1
626 .addImm(0) // gds
627 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
Matt Arsenault41033282014-10-10 22:01:59 +0000628
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000629 moveInstsAfter(Write2, CI.InstsToMove);
630
631 MachineBasicBlock::iterator Next = std::next(CI.I);
632 CI.I->eraseFromParent();
633 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000634
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000635 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000636 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000637}
638
Marek Olsakb953cc32017-11-09 01:52:23 +0000639MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
640 CombineInfo &CI) {
641 MachineBasicBlock *MBB = CI.I->getParent();
642 DebugLoc DL = CI.I->getDebugLoc();
643 unsigned Opcode = CI.IsX2 ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM :
644 AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
645
646 const TargetRegisterClass *SuperRC =
647 CI.IsX2 ? &AMDGPU::SReg_128RegClass : &AMDGPU::SReg_64_XEXECRegClass;
648 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
649 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
650
651 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
652 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
653 .addImm(MergedOffset) // offset
654 .addImm(CI.GLC0) // glc
655 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
656
657 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
658 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
659
660 // Handle descending offsets
661 if (CI.Offset0 > CI.Offset1)
662 std::swap(SubRegIdx0, SubRegIdx1);
663
664 // Copy to the old destination registers.
665 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
666 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
667 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
668
669 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
670 .add(*Dest0) // Copy to same destination including flags and sub reg.
671 .addReg(DestReg, 0, SubRegIdx0);
672 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
673 .add(*Dest1)
674 .addReg(DestReg, RegState::Kill, SubRegIdx1);
675
676 moveInstsAfter(Copy1, CI.InstsToMove);
677
678 MachineBasicBlock::iterator Next = std::next(CI.I);
679 CI.I->eraseFromParent();
680 CI.Paired->eraseFromParent();
681 return Next;
682}
683
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000684MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
Marek Olsak6a0548a2017-11-09 01:52:30 +0000685 CombineInfo &CI) {
686 MachineBasicBlock *MBB = CI.I->getParent();
687 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000688 unsigned Opcode;
689
690 if (CI.InstClass == BUFFER_LOAD_OFFEN) {
691 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN :
692 AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
693 } else {
694 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET :
695 AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
696 }
Marek Olsak6a0548a2017-11-09 01:52:30 +0000697
698 const TargetRegisterClass *SuperRC =
699 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
700 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
701 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
702
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000703 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
704
705 if (CI.InstClass == BUFFER_LOAD_OFFEN)
706 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
707
708 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
Marek Olsak6a0548a2017-11-09 01:52:30 +0000709 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
710 .addImm(MergedOffset) // offset
711 .addImm(CI.GLC0) // glc
712 .addImm(CI.SLC0) // slc
713 .addImm(0) // tfe
714 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
715
716 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
717 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
718
719 // Handle descending offsets
720 if (CI.Offset0 > CI.Offset1)
721 std::swap(SubRegIdx0, SubRegIdx1);
722
723 // Copy to the old destination registers.
724 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
725 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
726 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
727
728 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
729 .add(*Dest0) // Copy to same destination including flags and sub reg.
730 .addReg(DestReg, 0, SubRegIdx0);
731 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
732 .add(*Dest1)
733 .addReg(DestReg, RegState::Kill, SubRegIdx1);
734
735 moveInstsAfter(Copy1, CI.InstsToMove);
736
737 MachineBasicBlock::iterator Next = std::next(CI.I);
738 CI.I->eraseFromParent();
739 CI.Paired->eraseFromParent();
740 return Next;
741}
742
Marek Olsak58410f32017-11-09 01:52:55 +0000743unsigned SILoadStoreOptimizer::promoteBufferStoreOpcode(
744 const MachineInstr &I, bool &IsX2, bool &IsOffen) const {
745 IsX2 = false;
746 IsOffen = false;
747
748 switch (I.getOpcode()) {
749 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
750 IsOffen = true;
751 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
752 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
753 IsOffen = true;
754 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact;
755 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
756 IsX2 = true;
757 IsOffen = true;
758 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
759 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact:
760 IsX2 = true;
761 IsOffen = true;
762 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN_exact;
763 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
764 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
765 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
766 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact;
767 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
768 IsX2 = true;
769 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
770 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact:
771 IsX2 = true;
772 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET_exact;
773 }
774 return 0;
775}
776
777MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
778 CombineInfo &CI) {
779 MachineBasicBlock *MBB = CI.I->getParent();
780 DebugLoc DL = CI.I->getDebugLoc();
781 bool Unused1, Unused2;
782 unsigned Opcode = promoteBufferStoreOpcode(*CI.I, Unused1, Unused2);
783
784 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
785 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
786
787 // Handle descending offsets
788 if (CI.Offset0 > CI.Offset1)
789 std::swap(SubRegIdx0, SubRegIdx1);
790
791 // Copy to the new source register.
792 const TargetRegisterClass *SuperRC =
793 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
794 unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
795
796 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
797 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
798
799 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
800 .add(*Src0)
801 .addImm(SubRegIdx0)
802 .add(*Src1)
803 .addImm(SubRegIdx1);
804
805 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
806 .addReg(SrcReg, RegState::Kill);
807
808 if (CI.InstClass == BUFFER_STORE_OFFEN)
809 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
810
811 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
812 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
813 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
814 .addImm(CI.GLC0) // glc
815 .addImm(CI.SLC0) // slc
816 .addImm(0) // tfe
817 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
818
819 moveInstsAfter(MIB, CI.InstsToMove);
820
821 MachineBasicBlock::iterator Next = std::next(CI.I);
822 CI.I->eraseFromParent();
823 CI.Paired->eraseFromParent();
824 return Next;
825}
826
Matt Arsenault41033282014-10-10 22:01:59 +0000827// Scan through looking for adjacent LDS operations with constant offsets from
828// the same base register. We rely on the scheduler to do the hard work of
829// clustering nearby loads, and assume these are all adjacent.
830bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000831 bool Modified = false;
832
833 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
834 MachineInstr &MI = *I;
835
836 // Don't combine if volatile.
837 if (MI.hasOrderedMemoryRef()) {
838 ++I;
839 continue;
840 }
841
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000842 CombineInfo CI;
843 CI.I = I;
Matt Arsenault41033282014-10-10 22:01:59 +0000844 unsigned Opc = MI.getOpcode();
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000845 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64 ||
846 Opc == AMDGPU::DS_READ_B32_gfx9 || Opc == AMDGPU::DS_READ_B64_gfx9) {
847
Marek Olsak6a0548a2017-11-09 01:52:30 +0000848 CI.InstClass = DS_READ_WRITE;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000849 CI.EltSize =
850 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8 : 4;
851
Marek Olsakb953cc32017-11-09 01:52:23 +0000852 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000853 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000854 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000855 } else {
856 ++I;
857 }
858
859 continue;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000860 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64 ||
861 Opc == AMDGPU::DS_WRITE_B32_gfx9 ||
862 Opc == AMDGPU::DS_WRITE_B64_gfx9) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000863 CI.InstClass = DS_READ_WRITE;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000864 CI.EltSize
865 = (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8 : 4;
866
Marek Olsakb953cc32017-11-09 01:52:23 +0000867 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000868 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000869 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000870 } else {
871 ++I;
872 }
873
874 continue;
875 }
Marek Olsakb2cc7792018-02-07 16:00:40 +0000876 if (Opc == AMDGPU::S_BUFFER_LOAD_DWORD_IMM ||
877 Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000878 // EltSize is in units of the offset encoding.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000879 CI.InstClass = S_BUFFER_LOAD_IMM;
Marek Olsakb953cc32017-11-09 01:52:23 +0000880 CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
Marek Olsakb953cc32017-11-09 01:52:23 +0000881 CI.IsX2 = Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
882 if (findMatchingInst(CI)) {
883 Modified = true;
884 I = mergeSBufferLoadImmPair(CI);
885 if (!CI.IsX2)
886 CreatedX2++;
887 } else {
888 ++I;
889 }
890 continue;
891 }
Marek Olsak6a0548a2017-11-09 01:52:30 +0000892 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000893 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
894 Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFSET ||
895 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET) {
896 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
897 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN)
898 CI.InstClass = BUFFER_LOAD_OFFEN;
899 else
900 CI.InstClass = BUFFER_LOAD_OFFSET;
901
Marek Olsak6a0548a2017-11-09 01:52:30 +0000902 CI.EltSize = 4;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000903 CI.IsX2 = Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
904 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000905 if (findMatchingInst(CI)) {
906 Modified = true;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000907 I = mergeBufferLoadPair(CI);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000908 if (!CI.IsX2)
909 CreatedX2++;
910 } else {
911 ++I;
912 }
913 continue;
914 }
Matt Arsenault41033282014-10-10 22:01:59 +0000915
Marek Olsak58410f32017-11-09 01:52:55 +0000916 bool StoreIsX2, IsOffen;
917 if (promoteBufferStoreOpcode(*I, StoreIsX2, IsOffen)) {
918 CI.InstClass = IsOffen ? BUFFER_STORE_OFFEN : BUFFER_STORE_OFFSET;
919 CI.EltSize = 4;
920 CI.IsX2 = StoreIsX2;
921 if (findMatchingInst(CI)) {
922 Modified = true;
923 I = mergeBufferStorePair(CI);
924 if (!CI.IsX2)
925 CreatedX2++;
926 } else {
927 ++I;
928 }
929 continue;
930 }
931
Matt Arsenault41033282014-10-10 22:01:59 +0000932 ++I;
933 }
934
935 return Modified;
936}
937
938bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +0000939 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000940 return false;
941
Tom Stellard5bfbae52018-07-11 20:59:01 +0000942 STM = &MF.getSubtarget<GCNSubtarget>();
Marek Olsakb953cc32017-11-09 01:52:23 +0000943 if (!STM->loadStoreOptEnabled())
Matt Arsenault03d85842016-06-27 20:32:13 +0000944 return false;
945
Marek Olsakb953cc32017-11-09 01:52:23 +0000946 TII = STM->getInstrInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000947 TRI = &TII->getRegisterInfo();
948
Matt Arsenault41033282014-10-10 22:01:59 +0000949 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000950 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000951
Matt Arsenault67e72de2017-08-31 01:53:09 +0000952 assert(MRI->isSSA() && "Must be run on SSA");
953
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000954 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
Matt Arsenault41033282014-10-10 22:01:59 +0000955
Matt Arsenault41033282014-10-10 22:01:59 +0000956 bool Modified = false;
957
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +0000958 for (MachineBasicBlock &MBB : MF) {
959 CreatedX2 = 0;
Matt Arsenault41033282014-10-10 22:01:59 +0000960 Modified |= optimizeBlock(MBB);
961
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +0000962 // Run again to convert x2 to x4.
963 if (CreatedX2 >= 1)
Marek Olsakb953cc32017-11-09 01:52:23 +0000964 Modified |= optimizeBlock(MBB);
965 }
966
Matt Arsenault41033282014-10-10 22:01:59 +0000967 return Modified;
968}