blob: 3bccf163608986f9c92b7c911ded688bfc3aa807 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief The AMDGPU target machine contains all of the hardware specific
12/// information needed to emit code for R600 and SI GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUTargetMachine.h"
Tom Stellardc93fc112015-12-10 02:13:01 +000017#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "AMDGPU.h"
19#include "AMDGPUTargetTransformInfo.h"
20#include "R600ISelLowering.h"
21#include "R600InstrInfo.h"
22#include "R600MachineScheduler.h"
23#include "SIISelLowering.h"
24#include "SIInstrInfo.h"
25#include "llvm/Analysis/Passes.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000026#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000027#include "llvm/CodeGen/MachineFunctionAnalysis.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000028#include "llvm/CodeGen/MachineModuleInfo.h"
29#include "llvm/CodeGen/Passes.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000030#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
31#include "llvm/CodeGen/TargetPassConfig.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/IR/Verifier.h"
33#include "llvm/MC/MCAsmInfo.h"
34#include "llvm/IR/LegacyPassManager.h"
35#include "llvm/Support/TargetRegistry.h"
36#include "llvm/Support/raw_os_ostream.h"
37#include "llvm/Transforms/IPO.h"
38#include "llvm/Transforms/Scalar.h"
Matt Arsenaultf42c6922016-06-15 00:11:01 +000039#include "llvm/Transforms/Scalar/GVN.h"
40#include "llvm/CodeGen/Passes.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000041
42using namespace llvm;
43
44extern "C" void LLVMInitializeAMDGPUTarget() {
45 // Register the target
46 RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
47 RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
Matt Arsenaultb87fc222015-10-01 22:10:03 +000048
49 PassRegistry *PR = PassRegistry::getPassRegistry();
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +000050 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +000051 initializeSIFixSGPRCopiesPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +000052 initializeSIFoldOperandsPass(*PR);
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +000053 initializeSIShrinkInstructionsPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +000054 initializeSIFixControlFlowLiveIntervalsPass(*PR);
55 initializeSILoadStoreOptimizerPass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +000056 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +000057 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +000058 initializeAMDGPUPromoteAllocaPass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +000059 initializeSIAnnotateControlFlowPass(*PR);
Konstantin Zhuravlyova7919322016-05-10 18:33:41 +000060 initializeSIDebuggerInsertNopsPass(*PR);
Tom Stellard6e1967e2016-02-05 17:42:38 +000061 initializeSIInsertWaitsPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000062 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +000063 initializeSILowerControlFlowPass(*PR);
Matt Arsenaultd3e4c642016-06-02 00:04:22 +000064 initializeSIDebuggerInsertNopsPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +000065}
66
Tom Stellarde135ffd2015-09-25 21:41:28 +000067static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Tom Stellardc93fc112015-12-10 02:13:01 +000068 return make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +000069}
70
Tom Stellard45bb48e2015-06-13 03:28:10 +000071static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
72 return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
73}
74
75static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +000076R600SchedRegistry("r600", "Run R600's custom scheduler",
77 createR600MachineScheduler);
78
79static MachineSchedRegistry
80SISchedRegistry("si", "Run SI's custom scheduler",
81 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +000082
Matt Arsenaultec30eb52016-05-31 16:57:45 +000083static StringRef computeDataLayout(const Triple &TT) {
84 if (TT.getArch() == Triple::r600) {
85 // 32-bit pointers.
86 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
87 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
Tom Stellard45bb48e2015-06-13 03:28:10 +000088 }
89
Matt Arsenaultec30eb52016-05-31 16:57:45 +000090 // 32-bit private, local, and region pointers. 64-bit global, constant and
91 // flat.
92 return "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
93 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
94 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
Tom Stellard45bb48e2015-06-13 03:28:10 +000095}
96
Matt Arsenaultb22828f2016-01-27 02:17:49 +000097LLVM_READNONE
98static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
99 if (!GPU.empty())
100 return GPU;
101
102 // HSA only supports CI+, so change the default GPU to a CI for HSA.
103 if (TT.getArch() == Triple::amdgcn)
104 return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
105
Matt Arsenault8e001942016-06-02 18:37:16 +0000106 return "r600";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000107}
108
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000109static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
110 if (!RM.hasValue())
111 return Reloc::PIC_;
112 return *RM;
113}
114
Tom Stellard45bb48e2015-06-13 03:28:10 +0000115AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
116 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000117 TargetOptions Options,
118 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000119 CodeModel::Model CM,
120 CodeGenOpt::Level OptLevel)
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000121 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
122 FS, Options, getEffectiveRelocModel(RM), CM, OptLevel),
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000123 TLOF(createTLOF(getTargetTriple())),
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000124 Subtarget(TT, getTargetCPU(), FS, *this), IntrinsicInfo() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000125 setRequiresStructuredCFG(true);
126 initAsmInfo();
127}
128
Tom Stellarde135ffd2015-09-25 21:41:28 +0000129AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000130
131//===----------------------------------------------------------------------===//
132// R600 Target Machine (R600 -> Cayman)
133//===----------------------------------------------------------------------===//
134
135R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000136 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000137 TargetOptions Options,
138 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000139 CodeModel::Model CM, CodeGenOpt::Level OL)
Tom Stellard5dde1d22016-02-05 18:29:17 +0000140 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000141
142//===----------------------------------------------------------------------===//
143// GCN Target Machine (SI+)
144//===----------------------------------------------------------------------===//
145
146GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000147 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000148 TargetOptions Options,
149 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000150 CodeModel::Model CM, CodeGenOpt::Level OL)
Tom Stellard5dde1d22016-02-05 18:29:17 +0000151 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000152
153//===----------------------------------------------------------------------===//
154// AMDGPU Pass Setup
155//===----------------------------------------------------------------------===//
156
157namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000158
Tom Stellard45bb48e2015-06-13 03:28:10 +0000159class AMDGPUPassConfig : public TargetPassConfig {
160public:
161 AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000162 : TargetPassConfig(TM, PM) {
163
164 // Exceptions and StackMaps are not supported, so these passes will never do
165 // anything.
166 disablePass(&StackMapLivenessID);
167 disablePass(&FuncletLayoutID);
168 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000169
170 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
171 return getTM<AMDGPUTargetMachine>();
172 }
173
174 ScheduleDAGInstrs *
175 createMachineScheduler(MachineSchedContext *C) const override {
176 const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
177 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
178 return createR600MachineScheduler(C);
Tom Stellardde008d32016-01-21 04:28:34 +0000179 else if (ST.enableSIScheduler())
180 return createSIMachineScheduler(C);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000181 return nullptr;
182 }
183
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000184 void addEarlyCSEOrGVNPass();
185 void addStraightLineScalarOptimizationPasses();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000186 void addIRPasses() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000187 bool addPreISel() override;
188 bool addInstSelector() override;
189 bool addGCPasses() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000190};
191
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000192class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000193public:
194 R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
195 : AMDGPUPassConfig(TM, PM) { }
196
197 bool addPreISel() override;
198 void addPreRegAlloc() override;
199 void addPreSched2() override;
200 void addPreEmitPass() override;
201};
202
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000203class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000204public:
205 GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
206 : AMDGPUPassConfig(TM, PM) { }
207 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000208 void addMachineSSAOptimization() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000209 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000210#ifdef LLVM_BUILD_GLOBAL_ISEL
211 bool addIRTranslator() override;
212 bool addRegBankSelect() override;
213#endif
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000214 void addFastRegAlloc(FunctionPass *RegAllocPass) override;
215 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000216 void addPreRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000217 void addPreSched2() override;
218 void addPreEmitPass() override;
219};
220
221} // End of anonymous namespace
222
223TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
Eric Christophera4e5d3c2015-09-16 23:38:13 +0000224 return TargetIRAnalysis([this](const Function &F) {
Mehdi Amini5010ebf2015-07-09 02:08:42 +0000225 return TargetTransformInfo(
226 AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
227 });
Tom Stellard45bb48e2015-06-13 03:28:10 +0000228}
229
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000230void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
231 if (getOptLevel() == CodeGenOpt::Aggressive)
232 addPass(createGVNPass());
233 else
234 addPass(createEarlyCSEPass());
235}
236
237void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
238 addPass(createSeparateConstOffsetFromGEPPass());
239 addPass(createSpeculativeExecutionPass());
240 // ReassociateGEPs exposes more opportunites for SLSR. See
241 // the example in reassociate-geps-and-slsr.ll.
242 addPass(createStraightLineStrengthReducePass());
243 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
244 // EarlyCSE can reuse.
245 addEarlyCSEOrGVNPass();
246 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
247 addPass(createNaryReassociatePass());
248 // NaryReassociate on GEPs creates redundant common expressions, so run
249 // EarlyCSE after it.
250 addPass(createEarlyCSEPass());
251}
252
Tom Stellard45bb48e2015-06-13 03:28:10 +0000253void AMDGPUPassConfig::addIRPasses() {
Matt Arsenaultbde80342016-05-18 15:41:07 +0000254 // There is no reason to run these.
255 disablePass(&StackMapLivenessID);
256 disablePass(&FuncletLayoutID);
257 disablePass(&PatchableFunctionID);
258
Tom Stellard45bb48e2015-06-13 03:28:10 +0000259 // Function calls are not supported, so make sure we inline everything.
260 addPass(createAMDGPUAlwaysInlinePass());
261 addPass(createAlwaysInlinerPass());
262 // We need to add the barrier noop pass, otherwise adding the function
263 // inlining pass will cause all of the PassConfigs passes to be run
264 // one function at a time, which means if we have a nodule with two
265 // functions, then we will generate code for the first function
266 // without ever running any passes on the second.
267 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000268
Tom Stellardfd253952015-08-07 23:19:30 +0000269 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
270 addPass(createAMDGPUOpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000271
Matt Arsenaulte0132462016-01-30 05:19:45 +0000272 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
273 const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
Matt Arsenault8b175672016-02-02 19:32:42 +0000274 if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
Matt Arsenaulte0132462016-01-30 05:19:45 +0000275 addPass(createAMDGPUPromoteAlloca(&TM));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000276 addPass(createSROAPass());
277 }
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000278
279 addStraightLineScalarOptimizationPasses();
280
281 TargetPassConfig::addIRPasses();
282
283 // EarlyCSE is not always strong enough to clean up what LSR produces. For
284 // example, GVN can combine
285 //
286 // %0 = add %a, %b
287 // %1 = add %b, %a
288 //
289 // and
290 //
291 // %0 = shl nsw %a, 2
292 // %1 = shl %a, 2
293 //
294 // but EarlyCSE can do neither of them.
295 if (getOptLevel() != CodeGenOpt::None)
296 addEarlyCSEOrGVNPass();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000297}
298
299bool
300AMDGPUPassConfig::addPreISel() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000301 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000302 return false;
303}
304
305bool AMDGPUPassConfig::addInstSelector() {
306 addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
307 return false;
308}
309
Matt Arsenault0a109002015-09-25 17:41:20 +0000310bool AMDGPUPassConfig::addGCPasses() {
311 // Do nothing. GC is not supported.
312 return false;
313}
314
Tom Stellard45bb48e2015-06-13 03:28:10 +0000315//===----------------------------------------------------------------------===//
316// R600 Pass Setup
317//===----------------------------------------------------------------------===//
318
319bool R600PassConfig::addPreISel() {
320 AMDGPUPassConfig::addPreISel();
Tom Stellardbc4497b2016-02-12 23:45:29 +0000321 const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
322 if (ST.IsIRStructurizerEnabled())
323 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000324 addPass(createR600TextureIntrinsicsReplacer());
325 return false;
326}
327
328void R600PassConfig::addPreRegAlloc() {
329 addPass(createR600VectorRegMerger(*TM));
330}
331
332void R600PassConfig::addPreSched2() {
333 const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
334 addPass(createR600EmitClauseMarkers(), false);
335 if (ST.isIfCvtEnabled())
336 addPass(&IfConverterID, false);
337 addPass(createR600ClauseMergePass(*TM), false);
338}
339
340void R600PassConfig::addPreEmitPass() {
341 addPass(createAMDGPUCFGStructurizerPass(), false);
342 addPass(createR600ExpandSpecialInstrsPass(*TM), false);
343 addPass(&FinalizeMachineBundlesID, false);
344 addPass(createR600Packetizer(*TM), false);
345 addPass(createR600ControlFlowFinalizer(*TM), false);
346}
347
348TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
349 return new R600PassConfig(this, PM);
350}
351
352//===----------------------------------------------------------------------===//
353// GCN Pass Setup
354//===----------------------------------------------------------------------===//
355
356bool GCNPassConfig::addPreISel() {
357 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000358
359 // FIXME: We need to run a pass to propagate the attributes when calls are
360 // supported.
361 addPass(&AMDGPUAnnotateKernelFeaturesID);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000362 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
Tom Stellard45bb48e2015-06-13 03:28:10 +0000363 addPass(createSinkingPass());
364 addPass(createSITypeRewriter());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000365 addPass(createAMDGPUAnnotateUniformValues());
Tom Stellardbc4497b2016-02-12 23:45:29 +0000366 addPass(createSIAnnotateControlFlowPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000367
Tom Stellard45bb48e2015-06-13 03:28:10 +0000368 return false;
369}
370
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000371void GCNPassConfig::addMachineSSAOptimization() {
372 TargetPassConfig::addMachineSSAOptimization();
373
374 // We want to fold operands after PeepholeOptimizer has run (or as part of
375 // it), because it will eliminate extra copies making it easier to fold the
376 // real source operand. We want to eliminate dead instructions after, so that
377 // we see fewer uses of the copies. We then need to clean up the dead
378 // instructions leftover after the operands are folded as well.
379 //
380 // XXX - Can we get away without running DeadMachineInstructionElim again?
381 addPass(&SIFoldOperandsID);
382 addPass(&DeadMachineInstructionElimID);
383}
384
Tom Stellard45bb48e2015-06-13 03:28:10 +0000385bool GCNPassConfig::addInstSelector() {
386 AMDGPUPassConfig::addInstSelector();
387 addPass(createSILowerI1CopiesPass());
Matt Arsenault782c03b2015-11-03 22:30:13 +0000388 addPass(&SIFixSGPRCopiesID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000389 return false;
390}
391
Tom Stellard000c5af2016-04-14 19:09:28 +0000392#ifdef LLVM_BUILD_GLOBAL_ISEL
393bool GCNPassConfig::addIRTranslator() {
394 addPass(new IRTranslator());
395 return false;
396}
397
398bool GCNPassConfig::addRegBankSelect() {
399 return false;
400}
401#endif
402
Tom Stellard45bb48e2015-06-13 03:28:10 +0000403void GCNPassConfig::addPreRegAlloc() {
404 const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
405
406 // This needs to be run directly before register allocation because
407 // earlier passes might recompute live intervals.
408 // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
409 if (getOptLevel() > CodeGenOpt::None) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000410 insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
411 }
412
413 if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
414 // Don't do this with no optimizations since it throws away debug info by
415 // merging nonadjacent loads.
416
417 // This should be run after scheduling, but before register allocation. It
418 // also need extra copies to the address operand to be eliminated.
Tom Stellard45bb48e2015-06-13 03:28:10 +0000419 insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000420 insertPass(&MachineSchedulerID, &RegisterCoalescerID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000421 }
422 addPass(createSIShrinkInstructionsPass(), false);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000423 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000424}
425
426void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000427 TargetPassConfig::addFastRegAlloc(RegAllocPass);
428}
429
430void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000431 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000432}
433
Tom Stellard45bb48e2015-06-13 03:28:10 +0000434void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000435}
436
437void GCNPassConfig::addPreEmitPass() {
Tom Stellardcb6ba622016-04-30 00:23:06 +0000438
439 // The hazard recognizer that runs as part of the post-ra scheduler does not
440 // gaurantee to be able handle all hazards correctly. This is because
441 // if there are multiple scheduling regions in a basic block, the regions
442 // are scheduled bottom up, so when we begin to schedule a region we don't
443 // know what instructions were emitted directly before it.
444 //
445 // Here we add a stand-alone hazard recognizer pass which can handle all cases.
446 // hazard recognizer pass.
447 addPass(&PostRAHazardRecognizerID);
448
Matt Arsenaulte2bd9a32016-06-09 23:19:14 +0000449 addPass(createSIInsertWaitsPass());
Matt Arsenaultcf2744f2016-04-29 20:23:42 +0000450 addPass(createSIShrinkInstructionsPass());
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000451 addPass(createSILowerControlFlowPass(), false);
Konstantin Zhuravlyova7919322016-05-10 18:33:41 +0000452 addPass(createSIDebuggerInsertNopsPass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000453}
454
455TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
456 return new GCNPassConfig(this, PM);
457}