blob: 1b2e5e6d0f76e8025f631c2e0e50738f544b4898 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12// ds_read_b32 v0, v2 offset:16
13// ds_read_b32 v1, v2 offset:32
14// ==>
15// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
17//
18// Future improvements:
19//
20// - This currently relies on the scheduler to place loads and stores next to
21// each other, and then only merges adjacent pairs of instructions. It would
22// be good to be more flexible with interleaved instructions, and possibly run
23// before scheduling. It currently missing stores of constants because loading
24// the constant into the data register is placed between the stores, although
25// this is arguably a scheduling problem.
26//
27// - Live interval recomputing seems inefficient. This currently only matches
28// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000029// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000030//
31// - With a list of instructions to process, we can also merge more. If a
32// cluster of loads have offsets that are too large to fit in the 8-bit
33// offsets, but are close enough to fit in the 8 bits, we can add to the base
34// pointer and use the new reduced offsets.
35//
36//===----------------------------------------------------------------------===//
37
38#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000039#include "AMDGPUSubtarget.h"
Matt Arsenault41033282014-10-10 22:01:59 +000040#include "SIInstrInfo.h"
41#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000042#include "Utils/AMDGPUBaseInfo.h"
43#include "llvm/ADT/ArrayRef.h"
44#include "llvm/ADT/SmallVector.h"
45#include "llvm/ADT/StringRef.h"
46#include "llvm/Analysis/AliasAnalysis.h"
47#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000048#include "llvm/CodeGen/MachineFunction.h"
49#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000050#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000051#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000052#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000053#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000054#include "llvm/IR/DebugLoc.h"
55#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000056#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000057#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000058#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000059#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000060#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000061#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000062#include <iterator>
63#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000064
65using namespace llvm;
66
67#define DEBUG_TYPE "si-load-store-opt"
68
69namespace {
70
71class SILoadStoreOptimizer : public MachineFunctionPass {
Eugene Zelenko59e12822017-08-08 00:47:13 +000072 using CombineInfo = struct {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000073 MachineBasicBlock::iterator I;
74 MachineBasicBlock::iterator Paired;
75 unsigned EltSize;
76 unsigned Offset0;
77 unsigned Offset1;
78 unsigned BaseOff;
79 bool UseST64;
80 SmallVector<MachineInstr*, 8> InstsToMove;
Eugene Zelenko59e12822017-08-08 00:47:13 +000081 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000082
Matt Arsenault41033282014-10-10 22:01:59 +000083private:
Eugene Zelenko66203762017-01-21 00:53:49 +000084 const SIInstrInfo *TII = nullptr;
85 const SIRegisterInfo *TRI = nullptr;
86 MachineRegisterInfo *MRI = nullptr;
87 AliasAnalysis *AA = nullptr;
Matt Arsenault41033282014-10-10 22:01:59 +000088
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000089 static bool offsetsCanBeCombined(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000090
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000091 bool findMatchingDSInst(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000092
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000093 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000094
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +000095 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +000096
97public:
98 static char ID;
99
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000100 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000101 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
102 }
103
104 bool optimizeBlock(MachineBasicBlock &MBB);
105
106 bool runOnMachineFunction(MachineFunction &MF) override;
107
Mehdi Amini117296c2016-10-01 02:56:57 +0000108 StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000109
110 void getAnalysisUsage(AnalysisUsage &AU) const override {
111 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000112 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000113
114 MachineFunctionPass::getAnalysisUsage(AU);
115 }
116};
117
Eugene Zelenko66203762017-01-21 00:53:49 +0000118} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000119
120INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
121 "SI Load / Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000122INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Matt Arsenault41033282014-10-10 22:01:59 +0000123INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
124 "SI Load / Store Optimizer", false, false)
125
126char SILoadStoreOptimizer::ID = 0;
127
128char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
129
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000130FunctionPass *llvm::createSILoadStoreOptimizerPass() {
131 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000132}
133
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000134static void moveInstsAfter(MachineBasicBlock::iterator I,
135 ArrayRef<MachineInstr*> InstsToMove) {
136 MachineBasicBlock *MBB = I->getParent();
137 ++I;
138 for (MachineInstr *MI : InstsToMove) {
139 MI->removeFromParent();
140 MBB->insert(I, MI);
141 }
142}
143
144static void addDefsToList(const MachineInstr &MI,
145 SmallVectorImpl<const MachineOperand *> &Defs) {
146 for (const MachineOperand &Def : MI.defs()) {
147 Defs.push_back(&Def);
148 }
149}
150
Eugene Zelenko66203762017-01-21 00:53:49 +0000151static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
152 MachineBasicBlock::iterator B,
153 const SIInstrInfo *TII,
154 AliasAnalysis * AA) {
Alexander Timofeevf867a402016-11-03 14:37:13 +0000155 return (TII->areMemAccessesTriviallyDisjoint(*A, *B, AA) ||
156 // RAW or WAR - cannot reorder
157 // WAW - cannot reorder
158 // RAR - safe to reorder
159 !(A->mayStore() || B->mayStore()));
160}
161
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000162// Add MI and its defs to the lists if MI reads one of the defs that are
163// already in the list. Returns true in that case.
164static bool
165addToListsIfDependent(MachineInstr &MI,
166 SmallVectorImpl<const MachineOperand *> &Defs,
167 SmallVectorImpl<MachineInstr*> &Insts) {
168 for (const MachineOperand *Def : Defs) {
169 bool ReadDef = MI.readsVirtualRegister(Def->getReg());
170 // If ReadDef is true, then there is a use of Def between I
171 // and the instruction that I will potentially be merged with. We
172 // will need to move this instruction after the merged instructions.
173 if (ReadDef) {
174 Insts.push_back(&MI);
175 addDefsToList(MI, Defs);
176 return true;
177 }
178 }
179
180 return false;
181}
182
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000183static bool
184canMoveInstsAcrossMemOp(MachineInstr &MemOp,
185 ArrayRef<MachineInstr*> InstsToMove,
186 const SIInstrInfo *TII,
187 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000188 assert(MemOp.mayLoadOrStore());
189
190 for (MachineInstr *InstToMove : InstsToMove) {
191 if (!InstToMove->mayLoadOrStore())
192 continue;
Alexander Timofeevf867a402016-11-03 14:37:13 +0000193 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
194 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000195 }
196 return true;
197}
198
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000199bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000200 // XXX - Would the same offset be OK? Is there any reason this would happen or
201 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000202 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000203 return false;
204
205 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000206 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000207 return false;
208
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000209 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
210 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
211 CI.UseST64 = false;
212 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000213
214 // If the offset in elements doesn't fit in 8-bits, we might be able to use
215 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000216 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
217 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
218 CI.Offset0 = EltOffset0 / 64;
219 CI.Offset1 = EltOffset1 / 64;
220 CI.UseST64 = true;
221 return true;
222 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000223
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000224 // Check if the new offsets fit in the reduced 8-bit range.
225 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
226 CI.Offset0 = EltOffset0;
227 CI.Offset1 = EltOffset1;
228 return true;
229 }
230
231 // Try to shift base address to decrease offsets.
232 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
233 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
234
235 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
236 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
237 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
238 CI.UseST64 = true;
239 return true;
240 }
241
242 if (isUInt<8>(OffsetDiff)) {
243 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
244 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
245 return true;
246 }
247
248 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000249}
250
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000251bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) {
252 MachineBasicBlock::iterator E = CI.I->getParent()->end();
253 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000254
255 int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
256 AMDGPU::OpName::addr);
257 const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx);
258
259 // We only ever merge operations with the same base address register, so don't
260 // bother scanning forward if there are no other uses.
261 if (MRI->hasOneNonDBGUse(AddrReg0.getReg()))
262 return false;
263
Matt Arsenault41033282014-10-10 22:01:59 +0000264 ++MBBI;
265
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000266 SmallVector<const MachineOperand *, 8> DefsToMove;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000267 addDefsToList(*CI.I, DefsToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000268
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000269 for ( ; MBBI != E; ++MBBI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000270 if (MBBI->getOpcode() != CI.I->getOpcode()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000271 // This is not a matching DS instruction, but we can keep looking as
272 // long as one of these conditions are met:
273 // 1. It is safe to move I down past MBBI.
274 // 2. It is safe to move MBBI down past the instruction that I will
275 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000276
Matt Arsenault2d69c922017-08-29 21:25:51 +0000277 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000278 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000279 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000280 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000281 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000282
283 if (MBBI->mayLoadOrStore() &&
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000284 !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000285 // We fail condition #1, but we may still be able to satisfy condition
286 // #2. Add this instruction to the move list and then we will check
287 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000288 CI.InstsToMove.push_back(&*MBBI);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000289 addDefsToList(*MBBI, DefsToMove);
290 continue;
291 }
292
293 // When we match I with another DS instruction we will be moving I down
294 // to the location of the matched instruction any uses of I will need to
295 // be moved down as well.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000296 addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000297 continue;
298 }
299
300 // Don't merge volatiles.
301 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000302 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000303
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000304 // Handle a case like
305 // DS_WRITE_B32 addr, v, idx0
306 // w = DS_READ_B32 addr, idx0
307 // DS_WRITE_B32 addr, f(w), idx1
308 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
309 // merging of the two writes.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000310 if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000311 continue;
312
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000313 const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
314
315 // Check same base pointer. Be careful of subregisters, which can occur with
316 // vectors of pointers.
317 if (AddrReg0.getReg() == AddrReg1.getReg() &&
318 AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000319 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000320 AMDGPU::OpName::offset);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000321 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff;
322 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
323 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000324
325 // Check both offsets fit in the reduced range.
326 // We also need to go through the list of instructions that we plan to
327 // move and make sure they are all safe to move down past the merged
328 // instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000329 if (offsetsCanBeCombined(CI))
330 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
331 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000332 }
333
334 // We've found a load/store that we couldn't merge for some reason.
335 // We could potentially keep looking, but we'd need to make sure that
336 // it was safe to move I and also all the instruction in InstsToMove
337 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000338 // check if we can move I across MBBI and if we can move all I's users
339 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
340 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000341 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000342 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000343 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000344}
345
Matt Arsenault41033282014-10-10 22:01:59 +0000346MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000347 CombineInfo &CI) {
348 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000349
350 // Be careful, since the addresses could be subregisters themselves in weird
351 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000352 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000353
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000354 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
355 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000356
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000357 unsigned NewOffset0 = CI.Offset0;
358 unsigned NewOffset1 = CI.Offset1;
359 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
360 : AMDGPU::DS_READ2_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000361
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000362 if (CI.UseST64)
363 Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
364 : AMDGPU::DS_READ2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000365
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000366 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
367 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000368
369 if (NewOffset0 > NewOffset1) {
370 // Canonicalize the merged instruction so the smaller offset comes first.
371 std::swap(NewOffset0, NewOffset1);
372 std::swap(SubRegIdx0, SubRegIdx1);
373 }
374
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000375 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
376 (NewOffset0 != NewOffset1) &&
377 "Computed offset doesn't fit");
378
379 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000380
381 const TargetRegisterClass *SuperRC
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000382 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000383 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
384
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000385 DebugLoc DL = CI.I->getDebugLoc();
386
387 unsigned BaseReg = AddrReg->getReg();
388 unsigned BaseRegFlags = 0;
389 if (CI.BaseOff) {
390 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
391 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000392 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000393 .addImm(CI.BaseOff)
394 .addReg(AddrReg->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000395 }
396
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000397 MachineInstrBuilder Read2 =
398 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
399 .addReg(BaseReg, BaseRegFlags) // addr
400 .addImm(NewOffset0) // offset0
401 .addImm(NewOffset1) // offset1
402 .addImm(0) // gds
403 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
404
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000405 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000406
Matt Arsenault84db5d92015-07-14 17:57:36 +0000407 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
408
409 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000410 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000411 .add(*Dest0) // Copy to same destination including flags and sub reg.
412 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000413 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000414 .add(*Dest1)
415 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000416
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000417 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000418
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000419 MachineBasicBlock::iterator Next = std::next(CI.I);
420 CI.I->eraseFromParent();
421 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000422
Matt Arsenault41033282014-10-10 22:01:59 +0000423 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000424 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000425}
426
427MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000428 CombineInfo &CI) {
429 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000430
431 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
432 // sure we preserve the subregister index and any register flags set on them.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000433 const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
434 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000435 const MachineOperand *Data1
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000436 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000437
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000438 unsigned NewOffset0 = CI.Offset0;
439 unsigned NewOffset1 = CI.Offset1;
440 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
441 : AMDGPU::DS_WRITE2_B64;
Matt Arsenault41033282014-10-10 22:01:59 +0000442
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000443 if (CI.UseST64)
444 Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
445 : AMDGPU::DS_WRITE2ST64_B64;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000446
Tom Stellarde175d8a2016-08-26 21:36:47 +0000447 if (NewOffset0 > NewOffset1) {
448 // Canonicalize the merged instruction so the smaller offset comes first.
449 std::swap(NewOffset0, NewOffset1);
450 std::swap(Data0, Data1);
451 }
452
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000453 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
454 (NewOffset0 != NewOffset1) &&
455 "Computed offset doesn't fit");
456
457 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000458 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000459
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000460 unsigned BaseReg = Addr->getReg();
461 unsigned BaseRegFlags = 0;
462 if (CI.BaseOff) {
463 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
464 BaseRegFlags = RegState::Kill;
Reid Klecknerdbc9ba32017-04-13 20:32:58 +0000465 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000466 .addImm(CI.BaseOff)
467 .addReg(Addr->getReg());
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000468 }
Matt Arsenault41033282014-10-10 22:01:59 +0000469
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000470 MachineInstrBuilder Write2 =
471 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
472 .addReg(BaseReg, BaseRegFlags) // addr
473 .add(*Data0) // data0
474 .add(*Data1) // data1
475 .addImm(NewOffset0) // offset0
476 .addImm(NewOffset1) // offset1
477 .addImm(0) // gds
478 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
Matt Arsenault41033282014-10-10 22:01:59 +0000479
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000480 moveInstsAfter(Write2, CI.InstsToMove);
481
482 MachineBasicBlock::iterator Next = std::next(CI.I);
483 CI.I->eraseFromParent();
484 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000485
Matt Arsenault41033282014-10-10 22:01:59 +0000486 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000487 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000488}
489
490// Scan through looking for adjacent LDS operations with constant offsets from
491// the same base register. We rely on the scheduler to do the hard work of
492// clustering nearby loads, and assume these are all adjacent.
493bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +0000494 bool Modified = false;
495
496 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
497 MachineInstr &MI = *I;
498
499 // Don't combine if volatile.
500 if (MI.hasOrderedMemoryRef()) {
501 ++I;
502 continue;
503 }
504
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000505 CombineInfo CI;
506 CI.I = I;
Matt Arsenault41033282014-10-10 22:01:59 +0000507 unsigned Opc = MI.getOpcode();
508 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000509 CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
510 if (findMatchingDSInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000511 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000512 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000513 } else {
514 ++I;
515 }
516
517 continue;
518 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000519 CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
520 if (findMatchingDSInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +0000521 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000522 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000523 } else {
524 ++I;
525 }
526
527 continue;
528 }
529
530 ++I;
531 }
532
533 return Modified;
534}
535
536bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000537 if (skipFunction(*MF.getFunction()))
538 return false;
539
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000540 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000541 if (!STM.loadStoreOptEnabled())
542 return false;
543
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000544 TII = STM.getInstrInfo();
545 TRI = &TII->getRegisterInfo();
546
Matt Arsenault41033282014-10-10 22:01:59 +0000547 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000548 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +0000549
550 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
551
Matt Arsenault41033282014-10-10 22:01:59 +0000552 bool Modified = false;
553
554 for (MachineBasicBlock &MBB : MF)
555 Modified |= optimizeBlock(MBB);
556
557 return Modified;
558}