blob: bc436d7ae42e417b22d5d35d87bd16044e0217dc [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief The AMDGPU target machine contains all of the hardware specific
12/// information needed to emit code for R600 and SI GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUTargetMachine.h"
17#include "AMDGPU.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000018#include "AMDGPUCallLowering.h"
19#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "AMDGPUTargetTransformInfo.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000021#include "GCNSchedStrategy.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000022#include "R600ISelLowering.h"
23#include "R600InstrInfo.h"
24#include "R600MachineScheduler.h"
25#include "SIISelLowering.h"
26#include "SIInstrInfo.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000027#include "SIMachineScheduler.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000028#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/CodeGen/Passes.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000030#include "llvm/CodeGen/TargetPassConfig.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000031#include "llvm/Support/TargetRegistry.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/Transforms/IPO.h"
Chandler Carruth67fc52f2016-08-17 02:56:20 +000033#include "llvm/Transforms/IPO/AlwaysInliner.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/Transforms/Scalar.h"
Matt Arsenaultf42c6922016-06-15 00:11:01 +000035#include "llvm/Transforms/Scalar/GVN.h"
Matt Arsenault908b9e22016-07-01 03:33:52 +000036#include "llvm/Transforms/Vectorize.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037
38using namespace llvm;
39
Matt Arsenaultc5816112016-06-24 06:30:22 +000040static cl::opt<bool> EnableR600StructurizeCFG(
41 "r600-ir-structurize",
42 cl::desc("Use StructurizeCFG IR pass"),
43 cl::init(true));
44
Matt Arsenault03d85842016-06-27 20:32:13 +000045static cl::opt<bool> EnableSROA(
46 "amdgpu-sroa",
47 cl::desc("Run SROA after promote alloca pass"),
48 cl::ReallyHidden,
49 cl::init(true));
50
51static cl::opt<bool> EnableR600IfConvert(
52 "r600-if-convert",
53 cl::desc("Use if conversion pass"),
54 cl::ReallyHidden,
55 cl::init(true));
56
Matt Arsenault908b9e22016-07-01 03:33:52 +000057// Option to disable vectorizer for tests.
58static cl::opt<bool> EnableLoadStoreVectorizer(
59 "amdgpu-load-store-vectorizer",
60 cl::desc("Enable load store vectorizer"),
Matt Arsenault0efdd062016-09-09 22:29:28 +000061 cl::init(true),
Matt Arsenault908b9e22016-07-01 03:33:52 +000062 cl::Hidden);
63
Tom Stellard45bb48e2015-06-13 03:28:10 +000064extern "C" void LLVMInitializeAMDGPUTarget() {
65 // Register the target
66 RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
67 RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
Matt Arsenaultb87fc222015-10-01 22:10:03 +000068
69 PassRegistry *PR = PassRegistry::getPassRegistry();
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +000070 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +000071 initializeSIFixSGPRCopiesPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +000072 initializeSIFoldOperandsPass(*PR);
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +000073 initializeSIShrinkInstructionsPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +000074 initializeSIFixControlFlowLiveIntervalsPass(*PR);
75 initializeSILoadStoreOptimizerPass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +000076 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +000077 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +000078 initializeAMDGPUPromoteAllocaPass(*PR);
Matt Arsenault86de4862016-06-24 07:07:55 +000079 initializeAMDGPUCodeGenPreparePass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +000080 initializeSIAnnotateControlFlowPass(*PR);
Tom Stellard6e1967e2016-02-05 17:42:38 +000081 initializeSIInsertWaitsPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000082 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +000083 initializeSILowerControlFlowPass(*PR);
Matt Arsenault78fc9da2016-08-22 19:33:16 +000084 initializeSIInsertSkipsPass(*PR);
Matt Arsenaultd3e4c642016-06-02 00:04:22 +000085 initializeSIDebuggerInsertNopsPass(*PR);
Matt Arsenaulte6740752016-09-29 01:44:16 +000086 initializeSIOptimizeExecMaskingPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +000087}
88
Tom Stellarde135ffd2015-09-25 21:41:28 +000089static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Tom Stellardc93fc112015-12-10 02:13:01 +000090 return make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +000091}
92
Tom Stellard45bb48e2015-06-13 03:28:10 +000093static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
94 return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
95}
96
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000097static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
98 return new SIScheduleDAGMI(C);
99}
100
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000101static ScheduleDAGInstrs *
102createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
103 ScheduleDAGMILive *DAG =
104 new ScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
105 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
106 return DAG;
107}
108
Tom Stellard45bb48e2015-06-13 03:28:10 +0000109static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +0000110R600SchedRegistry("r600", "Run R600's custom scheduler",
111 createR600MachineScheduler);
112
113static MachineSchedRegistry
114SISchedRegistry("si", "Run SI's custom scheduler",
115 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000116
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000117static MachineSchedRegistry
118GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
119 "Run GCN scheduler to maximize occupancy",
120 createGCNMaxOccupancyMachineScheduler);
121
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000122static StringRef computeDataLayout(const Triple &TT) {
123 if (TT.getArch() == Triple::r600) {
124 // 32-bit pointers.
125 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
126 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000127 }
128
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000129 // 32-bit private, local, and region pointers. 64-bit global, constant and
130 // flat.
131 return "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
132 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
133 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134}
135
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000136LLVM_READNONE
137static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
138 if (!GPU.empty())
139 return GPU;
140
141 // HSA only supports CI+, so change the default GPU to a CI for HSA.
142 if (TT.getArch() == Triple::amdgcn)
143 return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
144
Matt Arsenault8e001942016-06-02 18:37:16 +0000145 return "r600";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000146}
147
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000148static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
Tom Stellard418beb72016-07-13 14:23:33 +0000149 // The AMDGPU toolchain only supports generating shared objects, so we
150 // must always use PIC.
151 return Reloc::PIC_;
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000152}
153
Tom Stellard45bb48e2015-06-13 03:28:10 +0000154AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
155 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000156 TargetOptions Options,
157 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000158 CodeModel::Model CM,
159 CodeGenOpt::Level OptLevel)
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000160 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
161 FS, Options, getEffectiveRelocModel(RM), CM, OptLevel),
162 TLOF(createTLOF(getTargetTriple())),
163 IntrinsicInfo() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000164 setRequiresStructuredCFG(true);
165 initAsmInfo();
166}
167
Tom Stellarde135ffd2015-09-25 21:41:28 +0000168AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000169
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000170StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
171 Attribute GPUAttr = F.getFnAttribute("target-cpu");
172 return GPUAttr.hasAttribute(Attribute::None) ?
173 getTargetCPU() : GPUAttr.getValueAsString();
174}
175
176StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
177 Attribute FSAttr = F.getFnAttribute("target-features");
178
179 return FSAttr.hasAttribute(Attribute::None) ?
180 getTargetFeatureString() :
181 FSAttr.getValueAsString();
182}
183
Tom Stellard45bb48e2015-06-13 03:28:10 +0000184//===----------------------------------------------------------------------===//
185// R600 Target Machine (R600 -> Cayman)
186//===----------------------------------------------------------------------===//
187
188R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000189 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000190 TargetOptions Options,
191 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000192 CodeModel::Model CM, CodeGenOpt::Level OL)
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000193 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
194
195const R600Subtarget *R600TargetMachine::getSubtargetImpl(
196 const Function &F) const {
197 StringRef GPU = getGPUName(F);
198 StringRef FS = getFeatureString(F);
199
200 SmallString<128> SubtargetKey(GPU);
201 SubtargetKey.append(FS);
202
203 auto &I = SubtargetMap[SubtargetKey];
204 if (!I) {
205 // This needs to be done before we create a new subtarget since any
206 // creation will depend on the TM and the code generation flags on the
207 // function that reside in TargetOptions.
208 resetTargetOptions(F);
209 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
210 }
211
212 return I.get();
213}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000214
215//===----------------------------------------------------------------------===//
216// GCN Target Machine (SI+)
217//===----------------------------------------------------------------------===//
218
Matt Arsenault55dff272016-06-28 00:11:26 +0000219#ifdef LLVM_BUILD_GLOBAL_ISEL
220namespace {
221struct SIGISelActualAccessor : public GISelAccessor {
Matt Arsenaulteb9025d2016-06-28 17:42:09 +0000222 std::unique_ptr<AMDGPUCallLowering> CallLoweringInfo;
223 const AMDGPUCallLowering *getCallLowering() const override {
Matt Arsenault55dff272016-06-28 00:11:26 +0000224 return CallLoweringInfo.get();
225 }
226};
227} // End anonymous namespace.
228#endif
229
Tom Stellard45bb48e2015-06-13 03:28:10 +0000230GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000231 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000232 TargetOptions Options,
233 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000234 CodeModel::Model CM, CodeGenOpt::Level OL)
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000235 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
236
237const SISubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
238 StringRef GPU = getGPUName(F);
239 StringRef FS = getFeatureString(F);
240
241 SmallString<128> SubtargetKey(GPU);
242 SubtargetKey.append(FS);
243
244 auto &I = SubtargetMap[SubtargetKey];
245 if (!I) {
246 // This needs to be done before we create a new subtarget since any
247 // creation will depend on the TM and the code generation flags on the
248 // function that reside in TargetOptions.
249 resetTargetOptions(F);
250 I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, FS, *this);
251
252#ifndef LLVM_BUILD_GLOBAL_ISEL
253 GISelAccessor *GISel = new GISelAccessor();
254#else
255 SIGISelActualAccessor *GISel = new SIGISelActualAccessor();
Matt Arsenaulteb9025d2016-06-28 17:42:09 +0000256 GISel->CallLoweringInfo.reset(
257 new AMDGPUCallLowering(*I->getTargetLowering()));
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000258#endif
259
260 I->setGISelAccessor(*GISel);
261 }
262
263 return I.get();
264}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000265
266//===----------------------------------------------------------------------===//
267// AMDGPU Pass Setup
268//===----------------------------------------------------------------------===//
269
270namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000271
Tom Stellard45bb48e2015-06-13 03:28:10 +0000272class AMDGPUPassConfig : public TargetPassConfig {
273public:
274 AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000275 : TargetPassConfig(TM, PM) {
276
277 // Exceptions and StackMaps are not supported, so these passes will never do
278 // anything.
279 disablePass(&StackMapLivenessID);
280 disablePass(&FuncletLayoutID);
281 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000282
283 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
284 return getTM<AMDGPUTargetMachine>();
285 }
286
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000287 void addEarlyCSEOrGVNPass();
288 void addStraightLineScalarOptimizationPasses();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000289 void addIRPasses() override;
Matt Arsenault908b9e22016-07-01 03:33:52 +0000290 void addCodeGenPrepare() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000291 bool addPreISel() override;
292 bool addInstSelector() override;
293 bool addGCPasses() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000294};
295
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000296class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000297public:
298 R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
299 : AMDGPUPassConfig(TM, PM) { }
300
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000301 ScheduleDAGInstrs *createMachineScheduler(
302 MachineSchedContext *C) const override {
303 return createR600MachineScheduler(C);
304 }
305
Tom Stellard45bb48e2015-06-13 03:28:10 +0000306 bool addPreISel() override;
307 void addPreRegAlloc() override;
308 void addPreSched2() override;
309 void addPreEmitPass() override;
310};
311
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000312class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000313public:
314 GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
315 : AMDGPUPassConfig(TM, PM) { }
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000316
317 GCNTargetMachine &getGCNTargetMachine() const {
318 return getTM<GCNTargetMachine>();
319 }
320
321 ScheduleDAGInstrs *
Matt Arsenault03d85842016-06-27 20:32:13 +0000322 createMachineScheduler(MachineSchedContext *C) const override;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000323
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000324 void addIRPasses() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000325 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000326 void addMachineSSAOptimization() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000327 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000328#ifdef LLVM_BUILD_GLOBAL_ISEL
329 bool addIRTranslator() override;
Tim Northover33b07d62016-07-22 20:03:43 +0000330 bool addLegalizeMachineIR() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000331 bool addRegBankSelect() override;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000332 bool addGlobalInstructionSelect() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000333#endif
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000334 void addFastRegAlloc(FunctionPass *RegAllocPass) override;
335 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000336 void addPreRegAlloc() override;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000337 void addPostRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000338 void addPreSched2() override;
339 void addPreEmitPass() override;
340};
341
342} // End of anonymous namespace
343
344TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
Eric Christophera4e5d3c2015-09-16 23:38:13 +0000345 return TargetIRAnalysis([this](const Function &F) {
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000346 return TargetTransformInfo(AMDGPUTTIImpl(this, F));
Mehdi Amini5010ebf2015-07-09 02:08:42 +0000347 });
Tom Stellard45bb48e2015-06-13 03:28:10 +0000348}
349
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000350void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
351 if (getOptLevel() == CodeGenOpt::Aggressive)
352 addPass(createGVNPass());
353 else
354 addPass(createEarlyCSEPass());
355}
356
357void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
358 addPass(createSeparateConstOffsetFromGEPPass());
359 addPass(createSpeculativeExecutionPass());
360 // ReassociateGEPs exposes more opportunites for SLSR. See
361 // the example in reassociate-geps-and-slsr.ll.
362 addPass(createStraightLineStrengthReducePass());
363 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
364 // EarlyCSE can reuse.
365 addEarlyCSEOrGVNPass();
366 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
367 addPass(createNaryReassociatePass());
368 // NaryReassociate on GEPs creates redundant common expressions, so run
369 // EarlyCSE after it.
370 addPass(createEarlyCSEPass());
371}
372
Tom Stellard45bb48e2015-06-13 03:28:10 +0000373void AMDGPUPassConfig::addIRPasses() {
Matt Arsenaultbde80342016-05-18 15:41:07 +0000374 // There is no reason to run these.
375 disablePass(&StackMapLivenessID);
376 disablePass(&FuncletLayoutID);
377 disablePass(&PatchableFunctionID);
378
Tom Stellard45bb48e2015-06-13 03:28:10 +0000379 // Function calls are not supported, so make sure we inline everything.
380 addPass(createAMDGPUAlwaysInlinePass());
Chandler Carruth67fc52f2016-08-17 02:56:20 +0000381 addPass(createAlwaysInlinerLegacyPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000382 // We need to add the barrier noop pass, otherwise adding the function
383 // inlining pass will cause all of the PassConfigs passes to be run
384 // one function at a time, which means if we have a nodule with two
385 // functions, then we will generate code for the first function
386 // without ever running any passes on the second.
387 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000388
Tom Stellardfd253952015-08-07 23:19:30 +0000389 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
390 addPass(createAMDGPUOpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000391
Matt Arsenaulte0132462016-01-30 05:19:45 +0000392 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
Matt Arsenault03d85842016-06-27 20:32:13 +0000393 if (TM.getOptLevel() > CodeGenOpt::None) {
Matt Arsenaulte0132462016-01-30 05:19:45 +0000394 addPass(createAMDGPUPromoteAlloca(&TM));
Matt Arsenault03d85842016-06-27 20:32:13 +0000395
396 if (EnableSROA)
397 addPass(createSROAPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000398
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000399 addStraightLineScalarOptimizationPasses();
400 }
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000401
402 TargetPassConfig::addIRPasses();
403
404 // EarlyCSE is not always strong enough to clean up what LSR produces. For
405 // example, GVN can combine
406 //
407 // %0 = add %a, %b
408 // %1 = add %b, %a
409 //
410 // and
411 //
412 // %0 = shl nsw %a, 2
413 // %1 = shl %a, 2
414 //
415 // but EarlyCSE can do neither of them.
416 if (getOptLevel() != CodeGenOpt::None)
417 addEarlyCSEOrGVNPass();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000418}
419
Matt Arsenault908b9e22016-07-01 03:33:52 +0000420void AMDGPUPassConfig::addCodeGenPrepare() {
421 TargetPassConfig::addCodeGenPrepare();
422
423 if (EnableLoadStoreVectorizer)
424 addPass(createLoadStoreVectorizerPass());
425}
426
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000427bool AMDGPUPassConfig::addPreISel() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000428 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000429 return false;
430}
431
432bool AMDGPUPassConfig::addInstSelector() {
Konstantin Zhuravlyov60a83732016-10-03 18:47:26 +0000433 addPass(createAMDGPUISelDag(getAMDGPUTargetMachine(), getOptLevel()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000434 return false;
435}
436
Matt Arsenault0a109002015-09-25 17:41:20 +0000437bool AMDGPUPassConfig::addGCPasses() {
438 // Do nothing. GC is not supported.
439 return false;
440}
441
Tom Stellard45bb48e2015-06-13 03:28:10 +0000442//===----------------------------------------------------------------------===//
443// R600 Pass Setup
444//===----------------------------------------------------------------------===//
445
446bool R600PassConfig::addPreISel() {
447 AMDGPUPassConfig::addPreISel();
Matt Arsenaultc5816112016-06-24 06:30:22 +0000448
449 if (EnableR600StructurizeCFG)
Tom Stellardbc4497b2016-02-12 23:45:29 +0000450 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000451 return false;
452}
453
454void R600PassConfig::addPreRegAlloc() {
455 addPass(createR600VectorRegMerger(*TM));
456}
457
458void R600PassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000459 addPass(createR600EmitClauseMarkers(), false);
Matt Arsenault03d85842016-06-27 20:32:13 +0000460 if (EnableR600IfConvert)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000461 addPass(&IfConverterID, false);
462 addPass(createR600ClauseMergePass(*TM), false);
463}
464
465void R600PassConfig::addPreEmitPass() {
466 addPass(createAMDGPUCFGStructurizerPass(), false);
467 addPass(createR600ExpandSpecialInstrsPass(*TM), false);
468 addPass(&FinalizeMachineBundlesID, false);
469 addPass(createR600Packetizer(*TM), false);
470 addPass(createR600ControlFlowFinalizer(*TM), false);
471}
472
473TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
474 return new R600PassConfig(this, PM);
475}
476
477//===----------------------------------------------------------------------===//
478// GCN Pass Setup
479//===----------------------------------------------------------------------===//
480
Matt Arsenault03d85842016-06-27 20:32:13 +0000481ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
482 MachineSchedContext *C) const {
483 const SISubtarget &ST = C->MF->getSubtarget<SISubtarget>();
484 if (ST.enableSIScheduler())
485 return createSIMachineScheduler(C);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000486 return createGCNMaxOccupancyMachineScheduler(C);
Matt Arsenault03d85842016-06-27 20:32:13 +0000487}
488
Tom Stellard45bb48e2015-06-13 03:28:10 +0000489bool GCNPassConfig::addPreISel() {
490 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000491
492 // FIXME: We need to run a pass to propagate the attributes when calls are
493 // supported.
494 addPass(&AMDGPUAnnotateKernelFeaturesID);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000495 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
Tom Stellard45bb48e2015-06-13 03:28:10 +0000496 addPass(createSinkingPass());
497 addPass(createSITypeRewriter());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000498 addPass(createAMDGPUAnnotateUniformValues());
Tom Stellardbc4497b2016-02-12 23:45:29 +0000499 addPass(createSIAnnotateControlFlowPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000500
Tom Stellard45bb48e2015-06-13 03:28:10 +0000501 return false;
502}
503
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000504void GCNPassConfig::addMachineSSAOptimization() {
505 TargetPassConfig::addMachineSSAOptimization();
506
507 // We want to fold operands after PeepholeOptimizer has run (or as part of
508 // it), because it will eliminate extra copies making it easier to fold the
509 // real source operand. We want to eliminate dead instructions after, so that
510 // we see fewer uses of the copies. We then need to clean up the dead
511 // instructions leftover after the operands are folded as well.
512 //
513 // XXX - Can we get away without running DeadMachineInstructionElim again?
514 addPass(&SIFoldOperandsID);
515 addPass(&DeadMachineInstructionElimID);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000516 addPass(&SILoadStoreOptimizerID);
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000517}
518
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +0000519void GCNPassConfig::addIRPasses() {
520 // TODO: May want to move later or split into an early and late one.
521 addPass(createAMDGPUCodeGenPreparePass(&getGCNTargetMachine()));
522
523 AMDGPUPassConfig::addIRPasses();
524}
525
Tom Stellard45bb48e2015-06-13 03:28:10 +0000526bool GCNPassConfig::addInstSelector() {
527 AMDGPUPassConfig::addInstSelector();
528 addPass(createSILowerI1CopiesPass());
Matt Arsenault782c03b2015-11-03 22:30:13 +0000529 addPass(&SIFixSGPRCopiesID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000530 return false;
531}
532
Tom Stellard000c5af2016-04-14 19:09:28 +0000533#ifdef LLVM_BUILD_GLOBAL_ISEL
534bool GCNPassConfig::addIRTranslator() {
535 addPass(new IRTranslator());
536 return false;
537}
538
Tim Northover33b07d62016-07-22 20:03:43 +0000539bool GCNPassConfig::addLegalizeMachineIR() {
540 return false;
541}
542
Tom Stellard000c5af2016-04-14 19:09:28 +0000543bool GCNPassConfig::addRegBankSelect() {
544 return false;
545}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000546
547bool GCNPassConfig::addGlobalInstructionSelect() {
548 return false;
549}
Tom Stellard000c5af2016-04-14 19:09:28 +0000550#endif
551
Tom Stellard45bb48e2015-06-13 03:28:10 +0000552void GCNPassConfig::addPreRegAlloc() {
Matt Arsenault4a07bf62016-06-22 20:26:24 +0000553 addPass(createSIShrinkInstructionsPass());
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000554 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000555}
556
557void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000558 // FIXME: We have to disable the verifier here because of PHIElimination +
559 // TwoAddressInstructions disabling it.
Matt Arsenaulte6740752016-09-29 01:44:16 +0000560
561 // This must be run immediately after phi elimination and before
562 // TwoAddressInstructions, otherwise the processing of the tied operand of
563 // SI_ELSE will introduce a copy of the tied operand source after the else.
564 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000565
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000566 TargetPassConfig::addFastRegAlloc(RegAllocPass);
567}
568
569void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000570 // This needs to be run directly before register allocation because earlier
571 // passes might recompute live intervals.
572 insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
573
Matt Arsenaulte6740752016-09-29 01:44:16 +0000574 // This must be run immediately after phi elimination and before
575 // TwoAddressInstructions, otherwise the processing of the tied operand of
576 // SI_ELSE will introduce a copy of the tied operand source after the else.
577 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000578
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000579 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000580}
581
Matt Arsenaulte6740752016-09-29 01:44:16 +0000582void GCNPassConfig::addPostRegAlloc() {
583 addPass(&SIOptimizeExecMaskingID);
584 TargetPassConfig::addPostRegAlloc();
585}
586
Tom Stellard45bb48e2015-06-13 03:28:10 +0000587void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000588}
589
590void GCNPassConfig::addPreEmitPass() {
Tom Stellardcb6ba622016-04-30 00:23:06 +0000591 // The hazard recognizer that runs as part of the post-ra scheduler does not
Matt Arsenault254a6452016-06-28 16:59:53 +0000592 // guarantee to be able handle all hazards correctly. This is because if there
593 // are multiple scheduling regions in a basic block, the regions are scheduled
594 // bottom up, so when we begin to schedule a region we don't know what
595 // instructions were emitted directly before it.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000596 //
Matt Arsenault254a6452016-06-28 16:59:53 +0000597 // Here we add a stand-alone hazard recognizer pass which can handle all
598 // cases.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000599 addPass(&PostRAHazardRecognizerID);
600
Matt Arsenaulte2bd9a32016-06-09 23:19:14 +0000601 addPass(createSIInsertWaitsPass());
Matt Arsenaultcf2744f2016-04-29 20:23:42 +0000602 addPass(createSIShrinkInstructionsPass());
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000603 addPass(&SIInsertSkipsPassID);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000604 addPass(createSIDebuggerInsertNopsPass());
Matt Arsenault6bc43d82016-10-06 16:20:41 +0000605 addPass(&BranchRelaxationPassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000606}
607
608TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
609 return new GCNPassConfig(this, PM);
610}