blob: 1ebc45dc9eec97e86a286e49a2ec5dfa69356413 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +000017// The same is done for certain SMEM and VMEM opcodes, e.g.:
Marek Olsakb953cc32017-11-09 01:52:23 +000018// s_buffer_load_dword s4, s[0:3], 4
19// s_buffer_load_dword s5, s[0:3], 8
20// ==>
21// s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22//
Matt Arsenault41033282014-10-10 22:01:59 +000023//
24// Future improvements:
25//
26// - This currently relies on the scheduler to place loads and stores next to
27// each other, and then only merges adjacent pairs of instructions. It would
28// be good to be more flexible with interleaved instructions, and possibly run
29// before scheduling. It currently missing stores of constants because loading
30// the constant into the data register is placed between the stores, although
31// this is arguably a scheduling problem.
32//
33// - Live interval recomputing seems inefficient. This currently only matches
34// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000035// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000036//
37// - With a list of instructions to process, we can also merge more. If a
38// cluster of loads have offsets that are too large to fit in the 8-bit
39// offsets, but are close enough to fit in the 8 bits, we can add to the base
40// pointer and use the new reduced offsets.
41//
42//===----------------------------------------------------------------------===//
43
44#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000045#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000046#include "SIInstrInfo.h"
47#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000048#include "Utils/AMDGPUBaseInfo.h"
49#include "llvm/ADT/ArrayRef.h"
50#include "llvm/ADT/SmallVector.h"
51#include "llvm/ADT/StringRef.h"
52#include "llvm/Analysis/AliasAnalysis.h"
53#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000054#include "llvm/CodeGen/MachineFunction.h"
55#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000056#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000057#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000058#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000059#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000060#include "llvm/IR/DebugLoc.h"
61#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000062#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000063#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000064#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000065#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000066#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000067#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000068#include <iterator>
69#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000070
71using namespace llvm;
72
73#define DEBUG_TYPE "si-load-store-opt"
74
75namespace {
76
77class SILoadStoreOptimizer : public MachineFunctionPass {
Marek Olsak6a0548a2017-11-09 01:52:30 +000078 enum InstClassEnum {
79 DS_READ_WRITE,
80 S_BUFFER_LOAD_IMM,
81 BUFFER_LOAD_OFFEN,
Marek Olsak4c421a2d2017-11-09 01:52:36 +000082 BUFFER_LOAD_OFFSET,
Marek Olsak58410f32017-11-09 01:52:55 +000083 BUFFER_STORE_OFFEN,
84 BUFFER_STORE_OFFSET,
Marek Olsak6a0548a2017-11-09 01:52:30 +000085 };
86
NAKAMURA Takumiaba2b3d2017-10-10 08:30:53 +000087 struct CombineInfo {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000088 MachineBasicBlock::iterator I;
89 MachineBasicBlock::iterator Paired;
90 unsigned EltSize;
91 unsigned Offset0;
92 unsigned Offset1;
93 unsigned BaseOff;
Marek Olsak6a0548a2017-11-09 01:52:30 +000094 InstClassEnum InstClass;
Marek Olsakb953cc32017-11-09 01:52:23 +000095 bool GLC0;
96 bool GLC1;
Marek Olsak6a0548a2017-11-09 01:52:30 +000097 bool SLC0;
98 bool SLC1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000099 bool UseST64;
Marek Olsakb953cc32017-11-09 01:52:23 +0000100 bool IsX2;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000101 SmallVector<MachineInstr*, 8> InstsToMove;
Eugene Zelenko59e12822017-08-08 00:47:13 +0000102 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000103
Matt Arsenault41033282014-10-10 22:01:59 +0000104private:
Marek Olsakb953cc32017-11-09 01:52:23 +0000105 const SISubtarget *STM = nullptr;
Eugene Zelenko66203762017-01-21 00:53:49 +0000106 const SIInstrInfo *TII = nullptr;
107 const SIRegisterInfo *TRI = nullptr;
108 MachineRegisterInfo *MRI = nullptr;
109 AliasAnalysis *AA = nullptr;
Marek Olsakb953cc32017-11-09 01:52:23 +0000110 unsigned CreatedX2;
Matt Arsenault41033282014-10-10 22:01:59 +0000111
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000112 static bool offsetsCanBeCombined(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000113
Marek Olsakb953cc32017-11-09 01:52:23 +0000114 bool findMatchingInst(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000115
116 unsigned read2Opcode(unsigned EltSize) const;
117 unsigned read2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000118 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000119
120 unsigned write2Opcode(unsigned EltSize) const;
121 unsigned write2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000122 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000123 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000124 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
Marek Olsak58410f32017-11-09 01:52:55 +0000125 unsigned promoteBufferStoreOpcode(const MachineInstr &I, bool &IsX2,
126 bool &IsOffen) const;
127 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000128
129public:
130 static char ID;
131
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000132 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000133 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
134 }
135
136 bool optimizeBlock(MachineBasicBlock &MBB);
137
138 bool runOnMachineFunction(MachineFunction &MF) override;
139
Mark Searles7687d422018-01-22 21:46:43 +0000140 StringRef getPassName() const override { return "SI Load Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000141
142 void getAnalysisUsage(AnalysisUsage &AU) const override {
143 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000144 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000145
146 MachineFunctionPass::getAnalysisUsage(AU);
147 }
148};
149
Eugene Zelenko66203762017-01-21 00:53:49 +0000150} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000151
152INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
Mark Searles7687d422018-01-22 21:46:43 +0000153 "SI Load Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000154INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000155INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
Mark Searles7687d422018-01-22 21:46:43 +0000156 "SI Load Store Optimizer", false, false)
Matt Arsenault41033282014-10-10 22:01:59 +0000157
158char SILoadStoreOptimizer::ID = 0;
159
160char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
161
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000162FunctionPass *llvm::createSILoadStoreOptimizerPass() {
163 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000164}
165
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000166static void moveInstsAfter(MachineBasicBlock::iterator I,
167 ArrayRef<MachineInstr*> InstsToMove) {
168 MachineBasicBlock *MBB = I->getParent();
169 ++I;
170 for (MachineInstr *MI : InstsToMove) {
171 MI->removeFromParent();
172 MBB->insert(I, MI);
173 }
174}
175
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000176static void addDefsUsesToList(const MachineInstr &MI,
177 DenseSet<unsigned> &RegDefs,
178 DenseSet<unsigned> &PhysRegUses) {
179 for (const MachineOperand &Op : MI.operands()) {
180 if (Op.isReg()) {
181 if (Op.isDef())
182 RegDefs.insert(Op.getReg());
183 else if (Op.readsReg() &&
184 TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
185 PhysRegUses.insert(Op.getReg());
186 }
Matt Arsenaultb02cebf2018-02-08 01:56:14 +0000187 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000188}
189
Eugene Zelenko66203762017-01-21 00:53:49 +0000190static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
191 MachineBasicBlock::iterator B,
192 const SIInstrInfo *TII,
193 AliasAnalysis * AA) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000194 // RAW or WAR - cannot reorder
195 // WAW - cannot reorder
196 // RAR - safe to reorder
197 return !(A->mayStore() || B->mayStore()) ||
198 TII->areMemAccessesTriviallyDisjoint(*A, *B, AA);
Alexander Timofeevf867a402016-11-03 14:37:13 +0000199}
200
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000201// Add MI and its defs to the lists if MI reads one of the defs that are
202// already in the list. Returns true in that case.
203static bool
204addToListsIfDependent(MachineInstr &MI,
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000205 DenseSet<unsigned> &RegDefs,
206 DenseSet<unsigned> &PhysRegUses,
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000207 SmallVectorImpl<MachineInstr*> &Insts) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000208 for (MachineOperand &Use : MI.operands()) {
209 // If one of the defs is read, then there is a use of Def between I and the
210 // instruction that I will potentially be merged with. We will need to move
211 // this instruction after the merged instructions.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000212 //
213 // Similarly, if there is a def which is read by an instruction that is to
214 // be moved for merging, then we need to move the def-instruction as well.
215 // This can only happen for physical registers such as M0; virtual
216 // registers are in SSA form.
217 if (Use.isReg() &&
218 ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
219 (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
220 PhysRegUses.count(Use.getReg())))) {
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000221 Insts.push_back(&MI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000222 addDefsUsesToList(MI, RegDefs, PhysRegUses);
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000223 return true;
224 }
225 }
226
227 return false;
228}
229
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000230static bool
231canMoveInstsAcrossMemOp(MachineInstr &MemOp,
232 ArrayRef<MachineInstr*> InstsToMove,
233 const SIInstrInfo *TII,
234 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000235 assert(MemOp.mayLoadOrStore());
236
237 for (MachineInstr *InstToMove : InstsToMove) {
238 if (!InstToMove->mayLoadOrStore())
239 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000240 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
241 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000242 }
243 return true;
244}
245
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000246bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000247 // XXX - Would the same offset be OK? Is there any reason this would happen or
248 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000249 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000250 return false;
251
252 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000253 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000254 return false;
255
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000256 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
257 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
258 CI.UseST64 = false;
259 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000260
Marek Olsak58410f32017-11-09 01:52:55 +0000261 // Handle SMEM and VMEM instructions.
262 if (CI.InstClass != DS_READ_WRITE) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000263 unsigned Diff = CI.IsX2 ? 2 : 1;
264 return (EltOffset0 + Diff == EltOffset1 ||
265 EltOffset1 + Diff == EltOffset0) &&
Marek Olsak6a0548a2017-11-09 01:52:30 +0000266 CI.GLC0 == CI.GLC1 &&
267 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
Marek Olsakb953cc32017-11-09 01:52:23 +0000268 }
269
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000270 // If the offset in elements doesn't fit in 8-bits, we might be able to use
271 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000272 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
273 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
274 CI.Offset0 = EltOffset0 / 64;
275 CI.Offset1 = EltOffset1 / 64;
276 CI.UseST64 = true;
277 return true;
278 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000279
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000280 // Check if the new offsets fit in the reduced 8-bit range.
281 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
282 CI.Offset0 = EltOffset0;
283 CI.Offset1 = EltOffset1;
284 return true;
285 }
286
287 // Try to shift base address to decrease offsets.
288 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
289 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
290
291 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
292 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
293 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
294 CI.UseST64 = true;
295 return true;
296 }
297
298 if (isUInt<8>(OffsetDiff)) {
299 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
300 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
301 return true;
302 }
303
304 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000305}
306
Marek Olsakb953cc32017-11-09 01:52:23 +0000307bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000308 MachineBasicBlock *MBB = CI.I->getParent();
309 MachineBasicBlock::iterator E = MBB->end();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000310 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000311
Marek Olsak6a0548a2017-11-09 01:52:30 +0000312 unsigned AddrOpName[3] = {0};
313 int AddrIdx[3];
314 const MachineOperand *AddrReg[3];
315 unsigned NumAddresses = 0;
Marek Olsakb953cc32017-11-09 01:52:23 +0000316
Marek Olsak6a0548a2017-11-09 01:52:30 +0000317 switch (CI.InstClass) {
318 case DS_READ_WRITE:
319 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
320 break;
321 case S_BUFFER_LOAD_IMM:
322 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
323 break;
324 case BUFFER_LOAD_OFFEN:
Marek Olsak58410f32017-11-09 01:52:55 +0000325 case BUFFER_STORE_OFFEN:
Marek Olsak6a0548a2017-11-09 01:52:30 +0000326 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
327 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
328 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
329 break;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000330 case BUFFER_LOAD_OFFSET:
Marek Olsak58410f32017-11-09 01:52:55 +0000331 case BUFFER_STORE_OFFSET:
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000332 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
333 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
334 break;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000335 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000336
Marek Olsak6a0548a2017-11-09 01:52:30 +0000337 for (unsigned i = 0; i < NumAddresses; i++) {
338 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
339 AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
340
341 // We only ever merge operations with the same base address register, so don't
342 // bother scanning forward if there are no other uses.
343 if (AddrReg[i]->isReg() &&
344 (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
345 MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
346 return false;
347 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000348
Matt Arsenault41033282014-10-10 22:01:59 +0000349 ++MBBI;
350
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000351 DenseSet<unsigned> RegDefsToMove;
352 DenseSet<unsigned> PhysRegUsesToMove;
353 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000354
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000355 for ( ; MBBI != E; ++MBBI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000356 if (MBBI->getOpcode() != CI.I->getOpcode()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000357 // This is not a matching DS instruction, but we can keep looking as
358 // long as one of these conditions are met:
359 // 1. It is safe to move I down past MBBI.
360 // 2. It is safe to move MBBI down past the instruction that I will
361 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000362
Matt Arsenault2d69c922017-08-29 21:25:51 +0000363 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000364 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000365 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000366 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000367 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000368
369 if (MBBI->mayLoadOrStore() &&
Nicolai Haehnledd059c12017-11-22 12:25:21 +0000370 (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
371 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000372 // We fail condition #1, but we may still be able to satisfy condition
373 // #2. Add this instruction to the move list and then we will check
374 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000375 CI.InstsToMove.push_back(&*MBBI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000376 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000377 continue;
378 }
379
380 // When we match I with another DS instruction we will be moving I down
381 // to the location of the matched instruction any uses of I will need to
382 // be moved down as well.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000383 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
384 CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000385 continue;
386 }
387
388 // Don't merge volatiles.
389 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000390 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000391
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000392 // Handle a case like
393 // DS_WRITE_B32 addr, v, idx0
394 // w = DS_READ_B32 addr, idx0
395 // DS_WRITE_B32 addr, f(w), idx1
396 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
397 // merging of the two writes.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000398 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
399 CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000400 continue;
401
Marek Olsak6a0548a2017-11-09 01:52:30 +0000402 bool Match = true;
403 for (unsigned i = 0; i < NumAddresses; i++) {
404 const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000405
Marek Olsak6a0548a2017-11-09 01:52:30 +0000406 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
407 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
408 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
409 Match = false;
410 break;
411 }
412 continue;
413 }
414
415 // Check same base pointer. Be careful of subregisters, which can occur with
416 // vectors of pointers.
417 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
418 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
419 Match = false;
420 break;
421 }
422 }
423
424 if (Match) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000425 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000426 AMDGPU::OpName::offset);
Marek Olsakb953cc32017-11-09 01:52:23 +0000427 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
428 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000429 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000430
Marek Olsak6a0548a2017-11-09 01:52:30 +0000431 if (CI.InstClass == DS_READ_WRITE) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000432 CI.Offset0 &= 0xffff;
433 CI.Offset1 &= 0xffff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000434 } else {
435 CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
436 CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000437 if (CI.InstClass != S_BUFFER_LOAD_IMM) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000438 CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
439 CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
440 }
Marek Olsakb953cc32017-11-09 01:52:23 +0000441 }
442
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000443 // Check both offsets fit in the reduced range.
444 // We also need to go through the list of instructions that we plan to
445 // move and make sure they are all safe to move down past the merged
446 // instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000447 if (offsetsCanBeCombined(CI))
448 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
449 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000450 }
451
452 // We've found a load/store that we couldn't merge for some reason.
453 // We could potentially keep looking, but we'd need to make sure that
454 // it was safe to move I and also all the instruction in InstsToMove
455 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000456 // check if we can move I across MBBI and if we can move all I's users
457 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000458 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000459 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000460 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000461 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000462}
463
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000464unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
465 if (STM->ldsRequiresM0Init())
466 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
467 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
468}
469
470unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
471 if (STM->ldsRequiresM0Init())
472 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
473
474 return (EltSize == 4) ?
475 AMDGPU::DS_READ2ST64_B32_gfx9 : AMDGPU::DS_READ2ST64_B64_gfx9;
476}
477
Matt Arsenault41033282014-10-10 22:01:59 +0000478MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000479 CombineInfo &CI) {
480 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000481
482 // Be careful, since the addresses could be subregisters themselves in weird
483 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000484 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000485
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000486 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
487 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000488
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000489 unsigned NewOffset0 = CI.Offset0;
490 unsigned NewOffset1 = CI.Offset1;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000491 unsigned Opc = CI.UseST64 ?
492 read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000493
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000494 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
495 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000496
497 if (NewOffset0 > NewOffset1) {
498 // Canonicalize the merged instruction so the smaller offset comes first.
499 std::swap(NewOffset0, NewOffset1);
500 std::swap(SubRegIdx0, SubRegIdx1);
501 }
502
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000503 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
504 (NewOffset0 != NewOffset1) &&
505 "Computed offset doesn't fit");
506
507 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000508
509 const TargetRegisterClass *SuperRC
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000510 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000511 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
512
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000513 DebugLoc DL = CI.I->getDebugLoc();
514
515 unsigned BaseReg = AddrReg->getReg();
516 unsigned BaseRegFlags = 0;
517 if (CI.BaseOff) {
Mark Searles7687d422018-01-22 21:46:43 +0000518 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
519 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
520 .addImm(CI.BaseOff);
521
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000522 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
523 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000524
Mark Searles7687d422018-01-22 21:46:43 +0000525 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
526 .addReg(ImmReg)
Matt Arsenault84445dd2017-11-30 22:51:26 +0000527 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000528 }
529
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000530 MachineInstrBuilder Read2 =
531 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
532 .addReg(BaseReg, BaseRegFlags) // addr
533 .addImm(NewOffset0) // offset0
534 .addImm(NewOffset1) // offset1
535 .addImm(0) // gds
536 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
537
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000538 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000539
Matt Arsenault84db5d92015-07-14 17:57:36 +0000540 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
541
542 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000543 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000544 .add(*Dest0) // Copy to same destination including flags and sub reg.
545 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000546 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000547 .add(*Dest1)
548 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000549
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000550 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000551
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000552 MachineBasicBlock::iterator Next = std::next(CI.I);
553 CI.I->eraseFromParent();
554 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000555
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000556 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000557 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000558}
559
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000560unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
561 if (STM->ldsRequiresM0Init())
562 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
563 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9 : AMDGPU::DS_WRITE2_B64_gfx9;
564}
565
566unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
567 if (STM->ldsRequiresM0Init())
568 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
569
570 return (EltSize == 4) ?
571 AMDGPU::DS_WRITE2ST64_B32_gfx9 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
572}
573
Matt Arsenault41033282014-10-10 22:01:59 +0000574MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000575 CombineInfo &CI) {
576 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000577
578 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
579 // sure we preserve the subregister index and any register flags set on them.
Mark Searles7687d422018-01-22 21:46:43 +0000580 const MachineOperand *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000581 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000582 const MachineOperand *Data1
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000583 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000584
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000585 unsigned NewOffset0 = CI.Offset0;
586 unsigned NewOffset1 = CI.Offset1;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000587 unsigned Opc = CI.UseST64 ?
588 write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000589
Tom Stellarde175d8a2016-08-26 21:36:47 +0000590 if (NewOffset0 > NewOffset1) {
591 // Canonicalize the merged instruction so the smaller offset comes first.
592 std::swap(NewOffset0, NewOffset1);
593 std::swap(Data0, Data1);
594 }
595
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000596 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
597 (NewOffset0 != NewOffset1) &&
598 "Computed offset doesn't fit");
599
600 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000601 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000602
Mark Searles7687d422018-01-22 21:46:43 +0000603 unsigned BaseReg = AddrReg->getReg();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000604 unsigned BaseRegFlags = 0;
605 if (CI.BaseOff) {
Mark Searles7687d422018-01-22 21:46:43 +0000606 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
607 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
608 .addImm(CI.BaseOff);
609
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000610 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
611 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000612
Mark Searles7687d422018-01-22 21:46:43 +0000613 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
614 .addReg(ImmReg)
615 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000616 }
Matt Arsenault41033282014-10-10 22:01:59 +0000617
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000618 MachineInstrBuilder Write2 =
619 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
620 .addReg(BaseReg, BaseRegFlags) // addr
621 .add(*Data0) // data0
622 .add(*Data1) // data1
623 .addImm(NewOffset0) // offset0
624 .addImm(NewOffset1) // offset1
625 .addImm(0) // gds
626 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
Matt Arsenault41033282014-10-10 22:01:59 +0000627
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000628 moveInstsAfter(Write2, CI.InstsToMove);
629
630 MachineBasicBlock::iterator Next = std::next(CI.I);
631 CI.I->eraseFromParent();
632 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000633
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000634 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000635 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000636}
637
Marek Olsakb953cc32017-11-09 01:52:23 +0000638MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
639 CombineInfo &CI) {
640 MachineBasicBlock *MBB = CI.I->getParent();
641 DebugLoc DL = CI.I->getDebugLoc();
642 unsigned Opcode = CI.IsX2 ? AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM :
643 AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
644
645 const TargetRegisterClass *SuperRC =
646 CI.IsX2 ? &AMDGPU::SReg_128RegClass : &AMDGPU::SReg_64_XEXECRegClass;
647 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
648 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
649
650 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
651 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
652 .addImm(MergedOffset) // offset
653 .addImm(CI.GLC0) // glc
654 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
655
656 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
657 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
658
659 // Handle descending offsets
660 if (CI.Offset0 > CI.Offset1)
661 std::swap(SubRegIdx0, SubRegIdx1);
662
663 // Copy to the old destination registers.
664 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
665 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
666 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
667
668 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
669 .add(*Dest0) // Copy to same destination including flags and sub reg.
670 .addReg(DestReg, 0, SubRegIdx0);
671 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
672 .add(*Dest1)
673 .addReg(DestReg, RegState::Kill, SubRegIdx1);
674
675 moveInstsAfter(Copy1, CI.InstsToMove);
676
677 MachineBasicBlock::iterator Next = std::next(CI.I);
678 CI.I->eraseFromParent();
679 CI.Paired->eraseFromParent();
680 return Next;
681}
682
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000683MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
Marek Olsak6a0548a2017-11-09 01:52:30 +0000684 CombineInfo &CI) {
685 MachineBasicBlock *MBB = CI.I->getParent();
686 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000687 unsigned Opcode;
688
689 if (CI.InstClass == BUFFER_LOAD_OFFEN) {
690 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN :
691 AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
692 } else {
693 Opcode = CI.IsX2 ? AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET :
694 AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
695 }
Marek Olsak6a0548a2017-11-09 01:52:30 +0000696
697 const TargetRegisterClass *SuperRC =
698 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
699 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
700 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
701
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000702 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
703
704 if (CI.InstClass == BUFFER_LOAD_OFFEN)
705 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
706
707 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
Marek Olsak6a0548a2017-11-09 01:52:30 +0000708 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
709 .addImm(MergedOffset) // offset
710 .addImm(CI.GLC0) // glc
711 .addImm(CI.SLC0) // slc
712 .addImm(0) // tfe
713 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
714
715 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
716 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
717
718 // Handle descending offsets
719 if (CI.Offset0 > CI.Offset1)
720 std::swap(SubRegIdx0, SubRegIdx1);
721
722 // Copy to the old destination registers.
723 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
724 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
725 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
726
727 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
728 .add(*Dest0) // Copy to same destination including flags and sub reg.
729 .addReg(DestReg, 0, SubRegIdx0);
730 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
731 .add(*Dest1)
732 .addReg(DestReg, RegState::Kill, SubRegIdx1);
733
734 moveInstsAfter(Copy1, CI.InstsToMove);
735
736 MachineBasicBlock::iterator Next = std::next(CI.I);
737 CI.I->eraseFromParent();
738 CI.Paired->eraseFromParent();
739 return Next;
740}
741
Marek Olsak58410f32017-11-09 01:52:55 +0000742unsigned SILoadStoreOptimizer::promoteBufferStoreOpcode(
743 const MachineInstr &I, bool &IsX2, bool &IsOffen) const {
744 IsX2 = false;
745 IsOffen = false;
746
747 switch (I.getOpcode()) {
748 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
749 IsOffen = true;
750 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
751 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
752 IsOffen = true;
753 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact;
754 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
755 IsX2 = true;
756 IsOffen = true;
757 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
758 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN_exact:
759 IsX2 = true;
760 IsOffen = true;
761 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN_exact;
762 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
763 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
764 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
765 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact;
766 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
767 IsX2 = true;
768 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
769 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET_exact:
770 IsX2 = true;
771 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET_exact;
772 }
773 return 0;
774}
775
776MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
777 CombineInfo &CI) {
778 MachineBasicBlock *MBB = CI.I->getParent();
779 DebugLoc DL = CI.I->getDebugLoc();
780 bool Unused1, Unused2;
781 unsigned Opcode = promoteBufferStoreOpcode(*CI.I, Unused1, Unused2);
782
783 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
784 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
785
786 // Handle descending offsets
787 if (CI.Offset0 > CI.Offset1)
788 std::swap(SubRegIdx0, SubRegIdx1);
789
790 // Copy to the new source register.
791 const TargetRegisterClass *SuperRC =
792 CI.IsX2 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass;
793 unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
794
795 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
796 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
797
798 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
799 .add(*Src0)
800 .addImm(SubRegIdx0)
801 .add(*Src1)
802 .addImm(SubRegIdx1);
803
804 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
805 .addReg(SrcReg, RegState::Kill);
806
807 if (CI.InstClass == BUFFER_STORE_OFFEN)
808 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
809
810 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
811 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
812 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
813 .addImm(CI.GLC0) // glc
814 .addImm(CI.SLC0) // slc
815 .addImm(0) // tfe
816 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
817
818 moveInstsAfter(MIB, CI.InstsToMove);
819
820 MachineBasicBlock::iterator Next = std::next(CI.I);
821 CI.I->eraseFromParent();
822 CI.Paired->eraseFromParent();
823 return Next;
824}
825
Matt Arsenault41033282014-10-10 22:01:59 +0000826// Scan through looking for adjacent LDS operations with constant offsets from
827// the same base register. We rely on the scheduler to do the hard work of
828// clustering nearby loads, and assume these are all adjacent.
829bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000830 bool Modified = false;
831
832 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
833 MachineInstr &MI = *I;
834
835 // Don't combine if volatile.
836 if (MI.hasOrderedMemoryRef()) {
837 ++I;
838 continue;
839 }
840
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000841 CombineInfo CI;
842 CI.I = I;
Matt Arsenault41033282014-10-10 22:01:59 +0000843 unsigned Opc = MI.getOpcode();
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000844 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64 ||
845 Opc == AMDGPU::DS_READ_B32_gfx9 || Opc == AMDGPU::DS_READ_B64_gfx9) {
846
Marek Olsak6a0548a2017-11-09 01:52:30 +0000847 CI.InstClass = DS_READ_WRITE;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000848 CI.EltSize =
849 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8 : 4;
850
Marek Olsakb953cc32017-11-09 01:52:23 +0000851 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000852 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000853 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000854 } else {
855 ++I;
856 }
857
858 continue;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000859 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64 ||
860 Opc == AMDGPU::DS_WRITE_B32_gfx9 ||
861 Opc == AMDGPU::DS_WRITE_B64_gfx9) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000862 CI.InstClass = DS_READ_WRITE;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000863 CI.EltSize
864 = (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8 : 4;
865
Marek Olsakb953cc32017-11-09 01:52:23 +0000866 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000867 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000868 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000869 } else {
870 ++I;
871 }
872
873 continue;
874 }
Marek Olsakb2cc7792018-02-07 16:00:40 +0000875 if (Opc == AMDGPU::S_BUFFER_LOAD_DWORD_IMM ||
876 Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000877 // EltSize is in units of the offset encoding.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000878 CI.InstClass = S_BUFFER_LOAD_IMM;
Marek Olsakb953cc32017-11-09 01:52:23 +0000879 CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
Marek Olsakb953cc32017-11-09 01:52:23 +0000880 CI.IsX2 = Opc == AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
881 if (findMatchingInst(CI)) {
882 Modified = true;
883 I = mergeSBufferLoadImmPair(CI);
884 if (!CI.IsX2)
885 CreatedX2++;
886 } else {
887 ++I;
888 }
889 continue;
890 }
Marek Olsak6a0548a2017-11-09 01:52:30 +0000891 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000892 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
893 Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFSET ||
894 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET) {
895 if (Opc == AMDGPU::BUFFER_LOAD_DWORD_OFFEN ||
896 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN)
897 CI.InstClass = BUFFER_LOAD_OFFEN;
898 else
899 CI.InstClass = BUFFER_LOAD_OFFSET;
900
Marek Olsak6a0548a2017-11-09 01:52:30 +0000901 CI.EltSize = 4;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000902 CI.IsX2 = Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN ||
903 Opc == AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000904 if (findMatchingInst(CI)) {
905 Modified = true;
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000906 I = mergeBufferLoadPair(CI);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000907 if (!CI.IsX2)
908 CreatedX2++;
909 } else {
910 ++I;
911 }
912 continue;
913 }
Matt Arsenault41033282014-10-10 22:01:59 +0000914
Marek Olsak58410f32017-11-09 01:52:55 +0000915 bool StoreIsX2, IsOffen;
916 if (promoteBufferStoreOpcode(*I, StoreIsX2, IsOffen)) {
917 CI.InstClass = IsOffen ? BUFFER_STORE_OFFEN : BUFFER_STORE_OFFSET;
918 CI.EltSize = 4;
919 CI.IsX2 = StoreIsX2;
920 if (findMatchingInst(CI)) {
921 Modified = true;
922 I = mergeBufferStorePair(CI);
923 if (!CI.IsX2)
924 CreatedX2++;
925 } else {
926 ++I;
927 }
928 continue;
929 }
930
Matt Arsenault41033282014-10-10 22:01:59 +0000931 ++I;
932 }
933
934 return Modified;
935}
936
937bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +0000938 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000939 return false;
940
Marek Olsakb953cc32017-11-09 01:52:23 +0000941 STM = &MF.getSubtarget<SISubtarget>();
942 if (!STM->loadStoreOptEnabled())
Matt Arsenault03d85842016-06-27 20:32:13 +0000943 return false;
944
Marek Olsakb953cc32017-11-09 01:52:23 +0000945 TII = STM->getInstrInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000946 TRI = &TII->getRegisterInfo();
947
Matt Arsenault41033282014-10-10 22:01:59 +0000948 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000949 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000950
Matt Arsenault67e72de2017-08-31 01:53:09 +0000951 assert(MRI->isSSA() && "Must be run on SSA");
952
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000953 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
Matt Arsenault41033282014-10-10 22:01:59 +0000954
Matt Arsenault41033282014-10-10 22:01:59 +0000955 bool Modified = false;
956
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +0000957 for (MachineBasicBlock &MBB : MF) {
958 CreatedX2 = 0;
Matt Arsenault41033282014-10-10 22:01:59 +0000959 Modified |= optimizeBlock(MBB);
960
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +0000961 // Run again to convert x2 to x4.
962 if (CreatedX2 >= 1)
Marek Olsakb953cc32017-11-09 01:52:23 +0000963 Modified |= optimizeBlock(MBB);
964 }
965
Matt Arsenault41033282014-10-10 22:01:59 +0000966 return Modified;
967}