blob: 33ba0883e30c788164586fe450c35dca4f4f879c [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief The AMDGPU target machine contains all of the hardware specific
12/// information needed to emit code for R600 and SI GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUTargetMachine.h"
17#include "AMDGPU.h"
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +000018#include "AMDGPUAliasAnalysis.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000019#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000020#include "AMDGPUInstructionSelector.h"
21#include "AMDGPULegalizerInfo.h"
22#ifdef LLVM_BUILD_GLOBAL_ISEL
23#include "AMDGPURegisterBankInfo.h"
24#endif
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000025#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "AMDGPUTargetTransformInfo.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000027#include "GCNSchedStrategy.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000028#include "R600MachineScheduler.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000029#include "SIMachineScheduler.h"
Tom Stellardca166212017-01-30 21:56:46 +000030#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000031#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Tom Stellardca166212017-01-30 21:56:46 +000032#include "llvm/CodeGen/GlobalISel/Legalizer.h"
33#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/CodeGen/Passes.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000035#include "llvm/CodeGen/TargetPassConfig.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/Support/TargetRegistry.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/Transforms/IPO.h"
Chandler Carruth67fc52f2016-08-17 02:56:20 +000038#include "llvm/Transforms/IPO/AlwaysInliner.h"
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +000039#include "llvm/Transforms/IPO/PassManagerBuilder.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000040#include "llvm/Transforms/Scalar.h"
Matt Arsenaultf42c6922016-06-15 00:11:01 +000041#include "llvm/Transforms/Scalar/GVN.h"
Matt Arsenault908b9e22016-07-01 03:33:52 +000042#include "llvm/Transforms/Vectorize.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000043#include "llvm/IR/Attributes.h"
44#include "llvm/IR/Function.h"
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +000045#include "llvm/IR/LegacyPassManager.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000046#include "llvm/Pass.h"
47#include "llvm/Support/CommandLine.h"
48#include "llvm/Support/Compiler.h"
49#include "llvm/Target/TargetLoweringObjectFile.h"
50#include <memory>
Tom Stellard45bb48e2015-06-13 03:28:10 +000051
52using namespace llvm;
53
Matt Arsenaultc5816112016-06-24 06:30:22 +000054static cl::opt<bool> EnableR600StructurizeCFG(
55 "r600-ir-structurize",
56 cl::desc("Use StructurizeCFG IR pass"),
57 cl::init(true));
58
Matt Arsenault03d85842016-06-27 20:32:13 +000059static cl::opt<bool> EnableSROA(
60 "amdgpu-sroa",
61 cl::desc("Run SROA after promote alloca pass"),
62 cl::ReallyHidden,
63 cl::init(true));
64
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +000065static cl::opt<bool>
66EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
67 cl::desc("Run early if-conversion"),
68 cl::init(false));
69
Matt Arsenault03d85842016-06-27 20:32:13 +000070static cl::opt<bool> EnableR600IfConvert(
71 "r600-if-convert",
72 cl::desc("Use if conversion pass"),
73 cl::ReallyHidden,
74 cl::init(true));
75
Matt Arsenault908b9e22016-07-01 03:33:52 +000076// Option to disable vectorizer for tests.
77static cl::opt<bool> EnableLoadStoreVectorizer(
78 "amdgpu-load-store-vectorizer",
79 cl::desc("Enable load store vectorizer"),
Matt Arsenault0efdd062016-09-09 22:29:28 +000080 cl::init(true),
Matt Arsenault908b9e22016-07-01 03:33:52 +000081 cl::Hidden);
82
Alexander Timofeev18009562016-12-08 17:28:47 +000083// Option to to control global loads scalarization
84static cl::opt<bool> ScalarizeGlobal(
85 "amdgpu-scalarize-global-loads",
86 cl::desc("Enable global load scalarization"),
87 cl::init(false),
88 cl::Hidden);
89
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +000090// Option to run internalize pass.
91static cl::opt<bool> InternalizeSymbols(
92 "amdgpu-internalize-symbols",
93 cl::desc("Enable elimination of non-kernel functions and unused globals"),
94 cl::init(false),
95 cl::Hidden);
96
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +000097// Enable address space based alias analysis
98static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
99 cl::desc("Enable AMDGPU Alias Analysis"),
100 cl::init(true));
101
Tom Stellard45bb48e2015-06-13 03:28:10 +0000102extern "C" void LLVMInitializeAMDGPUTarget() {
103 // Register the target
Mehdi Aminif42454b2016-10-09 23:00:34 +0000104 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
105 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000106
107 PassRegistry *PR = PassRegistry::getPassRegistry();
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000108 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +0000109 initializeSIFixSGPRCopiesPass(*PR);
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000110 initializeSIFixVGPRCopiesPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000111 initializeSIFoldOperandsPass(*PR);
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +0000112 initializeSIShrinkInstructionsPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +0000113 initializeSIFixControlFlowLiveIntervalsPass(*PR);
114 initializeSILoadStoreOptimizerPass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +0000115 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +0000116 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenault0699ef32017-02-09 22:00:42 +0000117 initializeAMDGPULowerIntrinsicsPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000118 initializeAMDGPUPromoteAllocaPass(*PR);
Matt Arsenault86de4862016-06-24 07:07:55 +0000119 initializeAMDGPUCodeGenPreparePass(*PR);
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000120 initializeAMDGPUUnifyMetadataPass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +0000121 initializeSIAnnotateControlFlowPass(*PR);
Tom Stellard6e1967e2016-02-05 17:42:38 +0000122 initializeSIInsertWaitsPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000123 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000124 initializeSILowerControlFlowPass(*PR);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000125 initializeSIInsertSkipsPass(*PR);
Matt Arsenaultd3e4c642016-06-02 00:04:22 +0000126 initializeSIDebuggerInsertNopsPass(*PR);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000127 initializeSIOptimizeExecMaskingPass(*PR);
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000128 initializeAMDGPUAAWrapperPassPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000129}
130
Tom Stellarde135ffd2015-09-25 21:41:28 +0000131static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000132 return llvm::make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000133}
134
Tom Stellard45bb48e2015-06-13 03:28:10 +0000135static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000136 return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000137}
138
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +0000139static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
140 return new SIScheduleDAGMI(C);
141}
142
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000143static ScheduleDAGInstrs *
144createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
145 ScheduleDAGMILive *DAG =
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000146 new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
Matthias Braun115efcd2016-11-28 20:11:54 +0000147 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
148 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000149 return DAG;
150}
151
Tom Stellard45bb48e2015-06-13 03:28:10 +0000152static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +0000153R600SchedRegistry("r600", "Run R600's custom scheduler",
154 createR600MachineScheduler);
155
156static MachineSchedRegistry
157SISchedRegistry("si", "Run SI's custom scheduler",
158 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000159
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000160static MachineSchedRegistry
161GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
162 "Run GCN scheduler to maximize occupancy",
163 createGCNMaxOccupancyMachineScheduler);
164
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000165static StringRef computeDataLayout(const Triple &TT) {
166 if (TT.getArch() == Triple::r600) {
167 // 32-bit pointers.
168 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
169 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000170 }
171
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000172 // 32-bit private, local, and region pointers. 64-bit global, constant and
173 // flat.
174 return "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
175 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
176 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000177}
178
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000179LLVM_READNONE
180static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
181 if (!GPU.empty())
182 return GPU;
183
184 // HSA only supports CI+, so change the default GPU to a CI for HSA.
185 if (TT.getArch() == Triple::amdgcn)
186 return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
187
Matt Arsenault8e001942016-06-02 18:37:16 +0000188 return "r600";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000189}
190
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000191static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
Tom Stellard418beb72016-07-13 14:23:33 +0000192 // The AMDGPU toolchain only supports generating shared objects, so we
193 // must always use PIC.
194 return Reloc::PIC_;
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000195}
196
Tom Stellard45bb48e2015-06-13 03:28:10 +0000197AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
198 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000199 TargetOptions Options,
200 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000201 CodeModel::Model CM,
202 CodeGenOpt::Level OptLevel)
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000203 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
204 FS, Options, getEffectiveRelocModel(RM), CM, OptLevel),
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000205 TLOF(createTLOF(getTargetTriple())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000206 initAsmInfo();
207}
208
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000209AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000210
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000211StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
212 Attribute GPUAttr = F.getFnAttribute("target-cpu");
213 return GPUAttr.hasAttribute(Attribute::None) ?
214 getTargetCPU() : GPUAttr.getValueAsString();
215}
216
217StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
218 Attribute FSAttr = F.getFnAttribute("target-features");
219
220 return FSAttr.hasAttribute(Attribute::None) ?
221 getTargetFeatureString() :
222 FSAttr.getValueAsString();
223}
224
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000225void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
Stanislav Mekhanoshinee2dd782017-03-17 17:13:41 +0000226 Builder.DivergentTarget = true;
227
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000228 bool Internalize = InternalizeSymbols &&
229 (getOptLevel() > CodeGenOpt::None) &&
230 (getTargetTriple().getArch() == Triple::amdgcn);
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000231 Builder.addExtension(
Stanislav Mekhanoshinf6c1feb2017-01-27 16:38:10 +0000232 PassManagerBuilder::EP_ModuleOptimizerEarly,
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000233 [Internalize](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000234 PM.add(createAMDGPUUnifyMetadataPass());
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000235 if (Internalize) {
236 PM.add(createInternalizePass([=](const GlobalValue &GV) -> bool {
237 if (const Function *F = dyn_cast<Function>(&GV)) {
238 if (F->isDeclaration())
239 return true;
240 switch (F->getCallingConv()) {
241 default:
242 return false;
243 case CallingConv::AMDGPU_VS:
244 case CallingConv::AMDGPU_GS:
245 case CallingConv::AMDGPU_PS:
246 case CallingConv::AMDGPU_CS:
247 case CallingConv::AMDGPU_KERNEL:
248 case CallingConv::SPIR_KERNEL:
249 return true;
250 }
251 }
252 return !GV.use_empty();
253 }));
254 PM.add(createGlobalDCEPass());
255 }
Stanislav Mekhanoshinf8050792017-03-16 16:11:46 +0000256 PM.add(createAMDGPUAlwaysInlinePass());
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000257 });
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000258}
259
Tom Stellard45bb48e2015-06-13 03:28:10 +0000260//===----------------------------------------------------------------------===//
261// R600 Target Machine (R600 -> Cayman)
262//===----------------------------------------------------------------------===//
263
264R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000265 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000266 TargetOptions Options,
267 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000268 CodeModel::Model CM, CodeGenOpt::Level OL)
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000269 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
270 setRequiresStructuredCFG(true);
271}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000272
273const R600Subtarget *R600TargetMachine::getSubtargetImpl(
274 const Function &F) const {
275 StringRef GPU = getGPUName(F);
276 StringRef FS = getFeatureString(F);
277
278 SmallString<128> SubtargetKey(GPU);
279 SubtargetKey.append(FS);
280
281 auto &I = SubtargetMap[SubtargetKey];
282 if (!I) {
283 // This needs to be done before we create a new subtarget since any
284 // creation will depend on the TM and the code generation flags on the
285 // function that reside in TargetOptions.
286 resetTargetOptions(F);
287 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
288 }
289
290 return I.get();
291}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000292
293//===----------------------------------------------------------------------===//
294// GCN Target Machine (SI+)
295//===----------------------------------------------------------------------===//
296
Matt Arsenault55dff272016-06-28 00:11:26 +0000297#ifdef LLVM_BUILD_GLOBAL_ISEL
298namespace {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000299
Matt Arsenault55dff272016-06-28 00:11:26 +0000300struct SIGISelActualAccessor : public GISelAccessor {
Matt Arsenaulteb9025d2016-06-28 17:42:09 +0000301 std::unique_ptr<AMDGPUCallLowering> CallLoweringInfo;
Tom Stellardca166212017-01-30 21:56:46 +0000302 std::unique_ptr<InstructionSelector> InstSelector;
303 std::unique_ptr<LegalizerInfo> Legalizer;
304 std::unique_ptr<RegisterBankInfo> RegBankInfo;
Matt Arsenaulteb9025d2016-06-28 17:42:09 +0000305 const AMDGPUCallLowering *getCallLowering() const override {
Matt Arsenault55dff272016-06-28 00:11:26 +0000306 return CallLoweringInfo.get();
307 }
Tom Stellardca166212017-01-30 21:56:46 +0000308 const InstructionSelector *getInstructionSelector() const override {
309 return InstSelector.get();
310 }
311 const LegalizerInfo *getLegalizerInfo() const override {
312 return Legalizer.get();
313 }
314 const RegisterBankInfo *getRegBankInfo() const override {
315 return RegBankInfo.get();
316 }
Matt Arsenault55dff272016-06-28 00:11:26 +0000317};
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000318
319} // end anonymous namespace
Matt Arsenault55dff272016-06-28 00:11:26 +0000320#endif
321
Tom Stellard45bb48e2015-06-13 03:28:10 +0000322GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000323 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000324 TargetOptions Options,
325 Optional<Reloc::Model> RM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000326 CodeModel::Model CM, CodeGenOpt::Level OL)
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000327 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
328
329const SISubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
330 StringRef GPU = getGPUName(F);
331 StringRef FS = getFeatureString(F);
332
333 SmallString<128> SubtargetKey(GPU);
334 SubtargetKey.append(FS);
335
336 auto &I = SubtargetMap[SubtargetKey];
337 if (!I) {
338 // This needs to be done before we create a new subtarget since any
339 // creation will depend on the TM and the code generation flags on the
340 // function that reside in TargetOptions.
341 resetTargetOptions(F);
342 I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, FS, *this);
343
344#ifndef LLVM_BUILD_GLOBAL_ISEL
345 GISelAccessor *GISel = new GISelAccessor();
346#else
347 SIGISelActualAccessor *GISel = new SIGISelActualAccessor();
Matt Arsenaulteb9025d2016-06-28 17:42:09 +0000348 GISel->CallLoweringInfo.reset(
349 new AMDGPUCallLowering(*I->getTargetLowering()));
Tom Stellardca166212017-01-30 21:56:46 +0000350 GISel->Legalizer.reset(new AMDGPULegalizerInfo());
351
352 GISel->RegBankInfo.reset(new AMDGPURegisterBankInfo(*I->getRegisterInfo()));
353 GISel->InstSelector.reset(new AMDGPUInstructionSelector(*I,
354 *static_cast<AMDGPURegisterBankInfo*>(GISel->RegBankInfo.get())));
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000355#endif
356
357 I->setGISelAccessor(*GISel);
358 }
359
Alexander Timofeev18009562016-12-08 17:28:47 +0000360 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
361
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000362 return I.get();
363}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000364
365//===----------------------------------------------------------------------===//
366// AMDGPU Pass Setup
367//===----------------------------------------------------------------------===//
368
369namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000370
Tom Stellard45bb48e2015-06-13 03:28:10 +0000371class AMDGPUPassConfig : public TargetPassConfig {
372public:
373 AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000374 : TargetPassConfig(TM, PM) {
Matt Arsenault0a109002015-09-25 17:41:20 +0000375 // Exceptions and StackMaps are not supported, so these passes will never do
376 // anything.
377 disablePass(&StackMapLivenessID);
378 disablePass(&FuncletLayoutID);
379 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000380
381 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
382 return getTM<AMDGPUTargetMachine>();
383 }
384
Matthias Braun115efcd2016-11-28 20:11:54 +0000385 ScheduleDAGInstrs *
386 createMachineScheduler(MachineSchedContext *C) const override {
387 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
388 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
389 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
390 return DAG;
391 }
392
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000393 void addEarlyCSEOrGVNPass();
394 void addStraightLineScalarOptimizationPasses();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000395 void addIRPasses() override;
Matt Arsenault908b9e22016-07-01 03:33:52 +0000396 void addCodeGenPrepare() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000397 bool addPreISel() override;
398 bool addInstSelector() override;
399 bool addGCPasses() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000400};
401
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000402class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000403public:
404 R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000405 : AMDGPUPassConfig(TM, PM) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000406
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000407 ScheduleDAGInstrs *createMachineScheduler(
408 MachineSchedContext *C) const override {
409 return createR600MachineScheduler(C);
410 }
411
Tom Stellard45bb48e2015-06-13 03:28:10 +0000412 bool addPreISel() override;
413 void addPreRegAlloc() override;
414 void addPreSched2() override;
415 void addPreEmitPass() override;
416};
417
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000418class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000419public:
420 GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000421 : AMDGPUPassConfig(TM, PM) {}
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000422
423 GCNTargetMachine &getGCNTargetMachine() const {
424 return getTM<GCNTargetMachine>();
425 }
426
427 ScheduleDAGInstrs *
Matt Arsenault03d85842016-06-27 20:32:13 +0000428 createMachineScheduler(MachineSchedContext *C) const override;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000429
Tom Stellard45bb48e2015-06-13 03:28:10 +0000430 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000431 void addMachineSSAOptimization() override;
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000432 bool addILPOpts() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000433 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000434#ifdef LLVM_BUILD_GLOBAL_ISEL
435 bool addIRTranslator() override;
Tim Northover33b07d62016-07-22 20:03:43 +0000436 bool addLegalizeMachineIR() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000437 bool addRegBankSelect() override;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000438 bool addGlobalInstructionSelect() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000439#endif
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000440 void addFastRegAlloc(FunctionPass *RegAllocPass) override;
441 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000442 void addPreRegAlloc() override;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000443 void addPostRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000444 void addPreSched2() override;
445 void addPreEmitPass() override;
446};
447
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000448} // end anonymous namespace
Tom Stellard45bb48e2015-06-13 03:28:10 +0000449
450TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
Eric Christophera4e5d3c2015-09-16 23:38:13 +0000451 return TargetIRAnalysis([this](const Function &F) {
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000452 return TargetTransformInfo(AMDGPUTTIImpl(this, F));
Mehdi Amini5010ebf2015-07-09 02:08:42 +0000453 });
Tom Stellard45bb48e2015-06-13 03:28:10 +0000454}
455
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000456void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
457 if (getOptLevel() == CodeGenOpt::Aggressive)
458 addPass(createGVNPass());
459 else
460 addPass(createEarlyCSEPass());
461}
462
463void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
464 addPass(createSeparateConstOffsetFromGEPPass());
465 addPass(createSpeculativeExecutionPass());
466 // ReassociateGEPs exposes more opportunites for SLSR. See
467 // the example in reassociate-geps-and-slsr.ll.
468 addPass(createStraightLineStrengthReducePass());
469 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
470 // EarlyCSE can reuse.
471 addEarlyCSEOrGVNPass();
472 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
473 addPass(createNaryReassociatePass());
474 // NaryReassociate on GEPs creates redundant common expressions, so run
475 // EarlyCSE after it.
476 addPass(createEarlyCSEPass());
477}
478
Tom Stellard45bb48e2015-06-13 03:28:10 +0000479void AMDGPUPassConfig::addIRPasses() {
Matt Arsenaultbde80342016-05-18 15:41:07 +0000480 // There is no reason to run these.
481 disablePass(&StackMapLivenessID);
482 disablePass(&FuncletLayoutID);
483 disablePass(&PatchableFunctionID);
484
Matt Arsenault0699ef32017-02-09 22:00:42 +0000485 addPass(createAMDGPULowerIntrinsicsPass());
486
Tom Stellard45bb48e2015-06-13 03:28:10 +0000487 // Function calls are not supported, so make sure we inline everything.
488 addPass(createAMDGPUAlwaysInlinePass());
Chandler Carruth67fc52f2016-08-17 02:56:20 +0000489 addPass(createAlwaysInlinerLegacyPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000490 // We need to add the barrier noop pass, otherwise adding the function
491 // inlining pass will cause all of the PassConfigs passes to be run
492 // one function at a time, which means if we have a nodule with two
493 // functions, then we will generate code for the first function
494 // without ever running any passes on the second.
495 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000496
Matt Arsenault0c329382017-01-30 18:40:29 +0000497 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
498
499 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
500 // TODO: May want to move later or split into an early and late one.
501
502 addPass(createAMDGPUCodeGenPreparePass(
503 static_cast<const GCNTargetMachine *>(&TM)));
504 }
505
Tom Stellardfd253952015-08-07 23:19:30 +0000506 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
507 addPass(createAMDGPUOpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000508
Matt Arsenault03d85842016-06-27 20:32:13 +0000509 if (TM.getOptLevel() > CodeGenOpt::None) {
Matt Arsenault417e0072017-02-08 06:16:04 +0000510 addPass(createInferAddressSpacesPass());
Matt Arsenaulte0132462016-01-30 05:19:45 +0000511 addPass(createAMDGPUPromoteAlloca(&TM));
Matt Arsenault03d85842016-06-27 20:32:13 +0000512
513 if (EnableSROA)
514 addPass(createSROAPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000515
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000516 addStraightLineScalarOptimizationPasses();
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000517
518 if (EnableAMDGPUAliasAnalysis) {
519 addPass(createAMDGPUAAWrapperPass());
520 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
521 AAResults &AAR) {
522 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
523 AAR.addAAResult(WrapperPass->getResult());
524 }));
525 }
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000526 }
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000527
528 TargetPassConfig::addIRPasses();
529
530 // EarlyCSE is not always strong enough to clean up what LSR produces. For
531 // example, GVN can combine
532 //
533 // %0 = add %a, %b
534 // %1 = add %b, %a
535 //
536 // and
537 //
538 // %0 = shl nsw %a, 2
539 // %1 = shl %a, 2
540 //
541 // but EarlyCSE can do neither of them.
542 if (getOptLevel() != CodeGenOpt::None)
543 addEarlyCSEOrGVNPass();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000544}
545
Matt Arsenault908b9e22016-07-01 03:33:52 +0000546void AMDGPUPassConfig::addCodeGenPrepare() {
547 TargetPassConfig::addCodeGenPrepare();
548
549 if (EnableLoadStoreVectorizer)
550 addPass(createLoadStoreVectorizerPass());
551}
552
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000553bool AMDGPUPassConfig::addPreISel() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000554 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000555 return false;
556}
557
558bool AMDGPUPassConfig::addInstSelector() {
Konstantin Zhuravlyov60a83732016-10-03 18:47:26 +0000559 addPass(createAMDGPUISelDag(getAMDGPUTargetMachine(), getOptLevel()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000560 return false;
561}
562
Matt Arsenault0a109002015-09-25 17:41:20 +0000563bool AMDGPUPassConfig::addGCPasses() {
564 // Do nothing. GC is not supported.
565 return false;
566}
567
Tom Stellard45bb48e2015-06-13 03:28:10 +0000568//===----------------------------------------------------------------------===//
569// R600 Pass Setup
570//===----------------------------------------------------------------------===//
571
572bool R600PassConfig::addPreISel() {
573 AMDGPUPassConfig::addPreISel();
Matt Arsenaultc5816112016-06-24 06:30:22 +0000574
575 if (EnableR600StructurizeCFG)
Tom Stellardbc4497b2016-02-12 23:45:29 +0000576 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000577 return false;
578}
579
580void R600PassConfig::addPreRegAlloc() {
581 addPass(createR600VectorRegMerger(*TM));
582}
583
584void R600PassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000585 addPass(createR600EmitClauseMarkers(), false);
Matt Arsenault03d85842016-06-27 20:32:13 +0000586 if (EnableR600IfConvert)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000587 addPass(&IfConverterID, false);
588 addPass(createR600ClauseMergePass(*TM), false);
589}
590
591void R600PassConfig::addPreEmitPass() {
592 addPass(createAMDGPUCFGStructurizerPass(), false);
593 addPass(createR600ExpandSpecialInstrsPass(*TM), false);
594 addPass(&FinalizeMachineBundlesID, false);
595 addPass(createR600Packetizer(*TM), false);
596 addPass(createR600ControlFlowFinalizer(*TM), false);
597}
598
599TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
600 return new R600PassConfig(this, PM);
601}
602
603//===----------------------------------------------------------------------===//
604// GCN Pass Setup
605//===----------------------------------------------------------------------===//
606
Matt Arsenault03d85842016-06-27 20:32:13 +0000607ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
608 MachineSchedContext *C) const {
609 const SISubtarget &ST = C->MF->getSubtarget<SISubtarget>();
610 if (ST.enableSIScheduler())
611 return createSIMachineScheduler(C);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000612 return createGCNMaxOccupancyMachineScheduler(C);
Matt Arsenault03d85842016-06-27 20:32:13 +0000613}
614
Tom Stellard45bb48e2015-06-13 03:28:10 +0000615bool GCNPassConfig::addPreISel() {
616 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000617
618 // FIXME: We need to run a pass to propagate the attributes when calls are
619 // supported.
Matt Arsenaulte823d922017-02-18 18:29:53 +0000620 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
621 addPass(createAMDGPUAnnotateKernelFeaturesPass(&TM));
Tom Stellardbc4497b2016-02-12 23:45:29 +0000622 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
Tom Stellard45bb48e2015-06-13 03:28:10 +0000623 addPass(createSinkingPass());
624 addPass(createSITypeRewriter());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000625 addPass(createAMDGPUAnnotateUniformValues());
Tom Stellardbc4497b2016-02-12 23:45:29 +0000626 addPass(createSIAnnotateControlFlowPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000627
Tom Stellard45bb48e2015-06-13 03:28:10 +0000628 return false;
629}
630
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000631void GCNPassConfig::addMachineSSAOptimization() {
632 TargetPassConfig::addMachineSSAOptimization();
633
634 // We want to fold operands after PeepholeOptimizer has run (or as part of
635 // it), because it will eliminate extra copies making it easier to fold the
636 // real source operand. We want to eliminate dead instructions after, so that
637 // we see fewer uses of the copies. We then need to clean up the dead
638 // instructions leftover after the operands are folded as well.
639 //
640 // XXX - Can we get away without running DeadMachineInstructionElim again?
641 addPass(&SIFoldOperandsID);
642 addPass(&DeadMachineInstructionElimID);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000643 addPass(&SILoadStoreOptimizerID);
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000644}
645
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000646bool GCNPassConfig::addILPOpts() {
647 if (EnableEarlyIfConversion)
648 addPass(&EarlyIfConverterID);
649
650 TargetPassConfig::addILPOpts();
651 return false;
652}
653
Tom Stellard45bb48e2015-06-13 03:28:10 +0000654bool GCNPassConfig::addInstSelector() {
655 AMDGPUPassConfig::addInstSelector();
656 addPass(createSILowerI1CopiesPass());
Matt Arsenault782c03b2015-11-03 22:30:13 +0000657 addPass(&SIFixSGPRCopiesID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000658 return false;
659}
660
Tom Stellard000c5af2016-04-14 19:09:28 +0000661#ifdef LLVM_BUILD_GLOBAL_ISEL
662bool GCNPassConfig::addIRTranslator() {
663 addPass(new IRTranslator());
664 return false;
665}
666
Tim Northover33b07d62016-07-22 20:03:43 +0000667bool GCNPassConfig::addLegalizeMachineIR() {
Tom Stellardca166212017-01-30 21:56:46 +0000668 addPass(new Legalizer());
Tim Northover33b07d62016-07-22 20:03:43 +0000669 return false;
670}
671
Tom Stellard000c5af2016-04-14 19:09:28 +0000672bool GCNPassConfig::addRegBankSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000673 addPass(new RegBankSelect());
Tom Stellard000c5af2016-04-14 19:09:28 +0000674 return false;
675}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000676
677bool GCNPassConfig::addGlobalInstructionSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000678 addPass(new InstructionSelect());
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000679 return false;
680}
Tom Stellardca166212017-01-30 21:56:46 +0000681
Tom Stellard000c5af2016-04-14 19:09:28 +0000682#endif
683
Tom Stellard45bb48e2015-06-13 03:28:10 +0000684void GCNPassConfig::addPreRegAlloc() {
Matt Arsenault4a07bf62016-06-22 20:26:24 +0000685 addPass(createSIShrinkInstructionsPass());
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000686 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000687}
688
689void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000690 // FIXME: We have to disable the verifier here because of PHIElimination +
691 // TwoAddressInstructions disabling it.
Matt Arsenaulte6740752016-09-29 01:44:16 +0000692
693 // This must be run immediately after phi elimination and before
694 // TwoAddressInstructions, otherwise the processing of the tied operand of
695 // SI_ELSE will introduce a copy of the tied operand source after the else.
696 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000697
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000698 TargetPassConfig::addFastRegAlloc(RegAllocPass);
699}
700
701void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000702 // This needs to be run directly before register allocation because earlier
703 // passes might recompute live intervals.
704 insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
705
Matt Arsenaulte6740752016-09-29 01:44:16 +0000706 // This must be run immediately after phi elimination and before
707 // TwoAddressInstructions, otherwise the processing of the tied operand of
708 // SI_ELSE will introduce a copy of the tied operand source after the else.
709 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000710
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000711 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000712}
713
Matt Arsenaulte6740752016-09-29 01:44:16 +0000714void GCNPassConfig::addPostRegAlloc() {
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000715 addPass(&SIFixVGPRCopiesID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000716 addPass(&SIOptimizeExecMaskingID);
717 TargetPassConfig::addPostRegAlloc();
718}
719
Tom Stellard45bb48e2015-06-13 03:28:10 +0000720void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000721}
722
723void GCNPassConfig::addPreEmitPass() {
Tom Stellardcb6ba622016-04-30 00:23:06 +0000724 // The hazard recognizer that runs as part of the post-ra scheduler does not
Matt Arsenault254a6452016-06-28 16:59:53 +0000725 // guarantee to be able handle all hazards correctly. This is because if there
726 // are multiple scheduling regions in a basic block, the regions are scheduled
727 // bottom up, so when we begin to schedule a region we don't know what
728 // instructions were emitted directly before it.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000729 //
Matt Arsenault254a6452016-06-28 16:59:53 +0000730 // Here we add a stand-alone hazard recognizer pass which can handle all
731 // cases.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000732 addPass(&PostRAHazardRecognizerID);
733
Matt Arsenaulte2bd9a32016-06-09 23:19:14 +0000734 addPass(createSIInsertWaitsPass());
Matt Arsenaultcf2744f2016-04-29 20:23:42 +0000735 addPass(createSIShrinkInstructionsPass());
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000736 addPass(&SIInsertSkipsPassID);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000737 addPass(createSIDebuggerInsertNopsPass());
Matt Arsenault6bc43d82016-10-06 16:20:41 +0000738 addPass(&BranchRelaxationPassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000739}
740
741TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
742 return new GCNPassConfig(this, PM);
743}