blob: 48cde90a9722daf1338fa67536f547f0cf558cc9 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000011/// The AMDGPU target machine contains all of the hardware specific
Tom Stellard45bb48e2015-06-13 03:28:10 +000012/// information needed to emit code for R600 and SI GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUTargetMachine.h"
17#include "AMDGPU.h"
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +000018#include "AMDGPUAliasAnalysis.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000019#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000020#include "AMDGPUInstructionSelector.h"
21#include "AMDGPULegalizerInfo.h"
Matt Arsenault9aa45f02017-07-06 20:57:05 +000022#include "AMDGPUMacroFusion.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000023#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "AMDGPUTargetTransformInfo.h"
Valery Pykhtinfd4c4102017-03-21 13:15:46 +000025#include "GCNIterativeScheduler.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000026#include "GCNSchedStrategy.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000027#include "R600MachineScheduler.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000028#include "SIMachineScheduler.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000029#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000030#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
Tom Stellardca166212017-01-30 21:56:46 +000031#include "llvm/CodeGen/GlobalISel/Legalizer.h"
32#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000033#include "llvm/CodeGen/Passes.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000034#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000035#include "llvm/IR/Attributes.h"
36#include "llvm/IR/Function.h"
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +000037#include "llvm/IR/LegacyPassManager.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000038#include "llvm/Pass.h"
39#include "llvm/Support/CommandLine.h"
40#include "llvm/Support/Compiler.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000041#include "llvm/Support/TargetRegistry.h"
David Blaikie6054e652018-03-23 23:58:19 +000042#include "llvm/Target/TargetLoweringObjectFile.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000043#include "llvm/Transforms/IPO.h"
44#include "llvm/Transforms/IPO/AlwaysInliner.h"
45#include "llvm/Transforms/IPO/PassManagerBuilder.h"
46#include "llvm/Transforms/Scalar.h"
47#include "llvm/Transforms/Scalar/GVN.h"
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +000048#include "llvm/Transforms/Utils.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000049#include "llvm/Transforms/Vectorize.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000050#include <memory>
Tom Stellard45bb48e2015-06-13 03:28:10 +000051
52using namespace llvm;
53
Matt Arsenaultc5816112016-06-24 06:30:22 +000054static cl::opt<bool> EnableR600StructurizeCFG(
55 "r600-ir-structurize",
56 cl::desc("Use StructurizeCFG IR pass"),
57 cl::init(true));
58
Matt Arsenault03d85842016-06-27 20:32:13 +000059static cl::opt<bool> EnableSROA(
60 "amdgpu-sroa",
61 cl::desc("Run SROA after promote alloca pass"),
62 cl::ReallyHidden,
63 cl::init(true));
64
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +000065static cl::opt<bool>
66EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
67 cl::desc("Run early if-conversion"),
68 cl::init(false));
69
Matt Arsenault03d85842016-06-27 20:32:13 +000070static cl::opt<bool> EnableR600IfConvert(
71 "r600-if-convert",
72 cl::desc("Use if conversion pass"),
73 cl::ReallyHidden,
74 cl::init(true));
75
Matt Arsenault908b9e22016-07-01 03:33:52 +000076// Option to disable vectorizer for tests.
77static cl::opt<bool> EnableLoadStoreVectorizer(
78 "amdgpu-load-store-vectorizer",
79 cl::desc("Enable load store vectorizer"),
Matt Arsenault0efdd062016-09-09 22:29:28 +000080 cl::init(true),
Matt Arsenault908b9e22016-07-01 03:33:52 +000081 cl::Hidden);
82
Hiroshi Inouec8e92452018-01-29 05:17:03 +000083// Option to control global loads scalarization
Alexander Timofeev18009562016-12-08 17:28:47 +000084static cl::opt<bool> ScalarizeGlobal(
85 "amdgpu-scalarize-global-loads",
86 cl::desc("Enable global load scalarization"),
Alexander Timofeev982aee62017-07-04 17:32:00 +000087 cl::init(true),
Alexander Timofeev18009562016-12-08 17:28:47 +000088 cl::Hidden);
89
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +000090// Option to run internalize pass.
91static cl::opt<bool> InternalizeSymbols(
92 "amdgpu-internalize-symbols",
93 cl::desc("Enable elimination of non-kernel functions and unused globals"),
94 cl::init(false),
95 cl::Hidden);
96
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +000097// Option to inline all early.
98static cl::opt<bool> EarlyInlineAll(
99 "amdgpu-early-inline-all",
100 cl::desc("Inline all functions early"),
101 cl::init(false),
102 cl::Hidden);
103
Sam Koltonf60ad582017-03-21 12:51:34 +0000104static cl::opt<bool> EnableSDWAPeephole(
105 "amdgpu-sdwa-peephole",
106 cl::desc("Enable SDWA peepholer"),
Sam Kolton9fa16962017-04-06 15:03:28 +0000107 cl::init(true));
Sam Koltonf60ad582017-03-21 12:51:34 +0000108
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000109// Enable address space based alias analysis
110static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
111 cl::desc("Enable AMDGPU Alias Analysis"),
112 cl::init(true));
113
Jan Sjodina06bfe02017-05-15 20:18:37 +0000114// Option to run late CFG structurizer
Matt Arsenaultcc852232017-10-10 20:22:07 +0000115static cl::opt<bool, true> LateCFGStructurize(
Jan Sjodina06bfe02017-05-15 20:18:37 +0000116 "amdgpu-late-structurize",
117 cl::desc("Enable late CFG structurization"),
Matt Arsenaultcc852232017-10-10 20:22:07 +0000118 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
Jan Sjodina06bfe02017-05-15 20:18:37 +0000119 cl::Hidden);
120
Matt Arsenaulta6801992018-07-10 14:03:41 +0000121static cl::opt<bool, true> EnableAMDGPUFunctionCalls(
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000122 "amdgpu-function-calls",
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000123 cl::desc("Enable AMDGPU function call support"),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000124 cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
125 cl::init(false),
126 cl::Hidden);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000127
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000128// Enable lib calls simplifications
129static cl::opt<bool> EnableLibCallSimplify(
130 "amdgpu-simplify-libcall",
Matt Arsenault2e4d3382018-05-29 19:35:46 +0000131 cl::desc("Enable amdgpu library simplifications"),
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000132 cl::init(true),
133 cl::Hidden);
134
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000135static cl::opt<bool> EnableLowerKernelArguments(
136 "amdgpu-ir-lower-kernel-arguments",
137 cl::desc("Lower kernel argument loads in IR pass"),
138 cl::init(true),
139 cl::Hidden);
140
Neil Henning66416572018-10-08 15:49:19 +0000141// Enable atomic optimization
142static cl::opt<bool> EnableAtomicOptimizations(
143 "amdgpu-atomic-optimizations",
144 cl::desc("Enable atomic optimizations"),
145 cl::init(false),
146 cl::Hidden);
147
Tom Stellard45bb48e2015-06-13 03:28:10 +0000148extern "C" void LLVMInitializeAMDGPUTarget() {
149 // Register the target
Mehdi Aminif42454b2016-10-09 23:00:34 +0000150 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
151 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000152
153 PassRegistry *PR = PassRegistry::getPassRegistry();
Tom Stellarda2f57be2017-08-02 22:19:45 +0000154 initializeR600ClauseMergePassPass(*PR);
155 initializeR600ControlFlowFinalizerPass(*PR);
156 initializeR600PacketizerPass(*PR);
157 initializeR600ExpandSpecialInstrsPassPass(*PR);
158 initializeR600VectorRegMergerPass(*PR);
Tom Stellarde753c522018-04-09 16:09:13 +0000159 initializeGlobalISel(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000160 initializeAMDGPUDAGToDAGISelPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000161 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +0000162 initializeSIFixSGPRCopiesPass(*PR);
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000163 initializeSIFixVGPRCopiesPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000164 initializeSIFoldOperandsPass(*PR);
Sam Koltonf60ad582017-03-21 12:51:34 +0000165 initializeSIPeepholeSDWAPass(*PR);
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +0000166 initializeSIShrinkInstructionsPass(*PR);
Stanislav Mekhanoshin37e7f952017-08-01 23:14:32 +0000167 initializeSIOptimizeExecMaskingPreRAPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +0000168 initializeSILoadStoreOptimizerPass(*PR);
Scott Linder11ef7982018-10-26 13:18:36 +0000169 initializeAMDGPUFixFunctionBitcastsPass(*PR);
Matt Arsenault746e0652017-06-02 18:02:42 +0000170 initializeAMDGPUAlwaysInlinePass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +0000171 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +0000172 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000173 initializeAMDGPUArgumentUsageInfoPass(*PR);
Neil Henning66416572018-10-08 15:49:19 +0000174 initializeAMDGPUAtomicOptimizerPass(*PR);
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000175 initializeAMDGPULowerKernelArgumentsPass(*PR);
Matt Arsenault372d7962018-05-18 21:35:00 +0000176 initializeAMDGPULowerKernelAttributesPass(*PR);
Matt Arsenault0699ef32017-02-09 22:00:42 +0000177 initializeAMDGPULowerIntrinsicsPass(*PR);
Yaxun Liude4b88d2017-10-10 19:39:48 +0000178 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000179 initializeAMDGPUPromoteAllocaPass(*PR);
Matt Arsenault86de4862016-06-24 07:07:55 +0000180 initializeAMDGPUCodeGenPreparePass(*PR);
Matt Arsenaultc06574f2017-07-28 18:40:05 +0000181 initializeAMDGPURewriteOutArgumentsPass(*PR);
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000182 initializeAMDGPUUnifyMetadataPass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +0000183 initializeSIAnnotateControlFlowPass(*PR);
Kannan Narayananacb089e2017-04-12 03:25:12 +0000184 initializeSIInsertWaitcntsPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000185 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000186 initializeSILowerControlFlowPass(*PR);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000187 initializeSIInsertSkipsPass(*PR);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000188 initializeSIMemoryLegalizerPass(*PR);
Matt Arsenaultd3e4c642016-06-02 00:04:22 +0000189 initializeSIDebuggerInsertNopsPass(*PR);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000190 initializeSIOptimizeExecMaskingPass(*PR);
Connor Abbott92638ab2017-08-04 18:36:52 +0000191 initializeSIFixWWMLivenessPass(*PR);
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000192 initializeSIFormMemoryClausesPass(*PR);
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000193 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000194 initializeAMDGPUAAWrapperPassPass(*PR);
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000195 initializeAMDGPUUseNativeCallsPass(*PR);
196 initializeAMDGPUSimplifyLibCallsPass(*PR);
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000197 initializeAMDGPUInlinerPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000198}
199
Tom Stellarde135ffd2015-09-25 21:41:28 +0000200static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000201 return llvm::make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000202}
203
Tom Stellard45bb48e2015-06-13 03:28:10 +0000204static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000205 return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000206}
207
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +0000208static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
209 return new SIScheduleDAGMI(C);
210}
211
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000212static ScheduleDAGInstrs *
213createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
214 ScheduleDAGMILive *DAG =
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000215 new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
Matthias Braun115efcd2016-11-28 20:11:54 +0000216 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
217 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
Matt Arsenault9aa45f02017-07-06 20:57:05 +0000218 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000219 return DAG;
220}
221
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000222static ScheduleDAGInstrs *
223createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
224 auto DAG = new GCNIterativeScheduler(C,
225 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
226 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
227 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
228 return DAG;
229}
230
231static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
232 return new GCNIterativeScheduler(C,
233 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
234}
235
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000236static ScheduleDAGInstrs *
237createIterativeILPMachineScheduler(MachineSchedContext *C) {
238 auto DAG = new GCNIterativeScheduler(C,
239 GCNIterativeScheduler::SCHEDULE_ILP);
240 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
241 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
242 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
243 return DAG;
244}
245
Tom Stellard45bb48e2015-06-13 03:28:10 +0000246static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +0000247R600SchedRegistry("r600", "Run R600's custom scheduler",
248 createR600MachineScheduler);
249
250static MachineSchedRegistry
251SISchedRegistry("si", "Run SI's custom scheduler",
252 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000253
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000254static MachineSchedRegistry
255GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
256 "Run GCN scheduler to maximize occupancy",
257 createGCNMaxOccupancyMachineScheduler);
258
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000259static MachineSchedRegistry
260IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
261 "Run GCN scheduler to maximize occupancy (experimental)",
262 createIterativeGCNMaxOccupancyMachineScheduler);
263
264static MachineSchedRegistry
265GCNMinRegSchedRegistry("gcn-minreg",
266 "Run GCN iterative scheduler for minimal register usage (experimental)",
267 createMinRegScheduler);
268
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000269static MachineSchedRegistry
270GCNILPSchedRegistry("gcn-ilp",
271 "Run GCN iterative scheduler for ILP scheduling (experimental)",
272 createIterativeILPMachineScheduler);
273
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000274static StringRef computeDataLayout(const Triple &TT) {
275 if (TT.getArch() == Triple::r600) {
276 // 32-bit pointers.
Yaxun Liucc56a8b2017-11-06 14:32:33 +0000277 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Matt Arsenault95329f82018-03-27 19:26:40 +0000278 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000279 }
280
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000281 // 32-bit private, local, and region pointers. 64-bit global, constant and
282 // flat.
Yaxun Liu0124b542018-02-13 18:00:25 +0000283 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000284 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Matt Arsenault95329f82018-03-27 19:26:40 +0000285 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000286}
287
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000288LLVM_READNONE
289static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
290 if (!GPU.empty())
291 return GPU;
292
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000293 if (TT.getArch() == Triple::amdgcn)
Matt Arsenault8728c5f2017-08-07 14:58:04 +0000294 return "generic";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000295
Matt Arsenault8e001942016-06-02 18:37:16 +0000296 return "r600";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000297}
298
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000299static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
Tom Stellard418beb72016-07-13 14:23:33 +0000300 // The AMDGPU toolchain only supports generating shared objects, so we
301 // must always use PIC.
302 return Reloc::PIC_;
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000303}
304
Rafael Espindola79e238a2017-08-03 02:16:21 +0000305static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM) {
306 if (CM)
307 return *CM;
308 return CodeModel::Small;
309}
310
Tom Stellard45bb48e2015-06-13 03:28:10 +0000311AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
312 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000313 TargetOptions Options,
314 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000315 Optional<CodeModel::Model> CM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000316 CodeGenOpt::Level OptLevel)
Matthias Braunbb8507e2017-10-12 22:57:28 +0000317 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
318 FS, Options, getEffectiveRelocModel(RM),
319 getEffectiveCodeModel(CM), OptLevel),
Rafael Espindola79e238a2017-08-03 02:16:21 +0000320 TLOF(createTLOF(getTargetTriple())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000321 initAsmInfo();
322}
323
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000324bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
Matt Arsenaulta6801992018-07-10 14:03:41 +0000325bool AMDGPUTargetMachine::EnableFunctionCalls = false;
326
327AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000328
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000329StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
330 Attribute GPUAttr = F.getFnAttribute("target-cpu");
331 return GPUAttr.hasAttribute(Attribute::None) ?
332 getTargetCPU() : GPUAttr.getValueAsString();
333}
334
335StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
336 Attribute FSAttr = F.getFnAttribute("target-features");
337
338 return FSAttr.hasAttribute(Attribute::None) ?
339 getTargetFeatureString() :
340 FSAttr.getValueAsString();
341}
342
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000343static ImmutablePass *createAMDGPUExternalAAWrapperPass() {
344 return createExternalAAWrapperPass([](Pass &P, Function &, AAResults &AAR) {
345 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
346 AAR.addAAResult(WrapperPass->getResult());
347 });
348}
349
Matt Arsenaulte745d992017-09-19 07:40:11 +0000350/// Predicate for Internalize pass.
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000351static bool mustPreserveGV(const GlobalValue &GV) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000352 if (const Function *F = dyn_cast<Function>(&GV))
353 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
354
355 return !GV.use_empty();
356}
357
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000358void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
Stanislav Mekhanoshinee2dd782017-03-17 17:13:41 +0000359 Builder.DivergentTarget = true;
360
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000361 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
Matt Arsenaulte745d992017-09-19 07:40:11 +0000362 bool Internalize = InternalizeSymbols;
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000363 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableAMDGPUFunctionCalls;
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000364 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
365 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000366
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000367 if (EnableAMDGPUFunctionCalls) {
368 delete Builder.Inliner;
Stanislav Mekhanoshin56418202017-09-20 06:10:15 +0000369 Builder.Inliner = createAMDGPUFunctionInliningPass();
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000370 }
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000371
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000372 Builder.addExtension(
Stanislav Mekhanoshinf6c1feb2017-01-27 16:38:10 +0000373 PassManagerBuilder::EP_ModuleOptimizerEarly,
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000374 [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &,
375 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000376 if (AMDGPUAA) {
377 PM.add(createAMDGPUAAWrapperPass());
378 PM.add(createAMDGPUExternalAAWrapperPass());
379 }
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000380 PM.add(createAMDGPUUnifyMetadataPass());
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000381 if (Internalize) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000382 PM.add(createInternalizePass(mustPreserveGV));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000383 PM.add(createGlobalDCEPass());
384 }
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000385 if (EarlyInline)
Stanislav Mekhanoshin89653df2017-03-30 20:16:02 +0000386 PM.add(createAMDGPUAlwaysInlinePass(false));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000387 });
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000388
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000389 const auto &Opt = Options;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000390 Builder.addExtension(
391 PassManagerBuilder::EP_EarlyAsPossible,
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000392 [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &,
393 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000394 if (AMDGPUAA) {
395 PM.add(createAMDGPUAAWrapperPass());
396 PM.add(createAMDGPUExternalAAWrapperPass());
397 }
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000398 PM.add(llvm::createAMDGPUUseNativeCallsPass());
399 if (LibCallSimplify)
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000400 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt));
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000401 });
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000402
403 Builder.addExtension(
404 PassManagerBuilder::EP_CGSCCOptimizerLate,
405 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
406 // Add infer address spaces pass to the opt pipeline after inlining
407 // but before SROA to increase SROA opportunities.
408 PM.add(createInferAddressSpacesPass());
Matt Arsenault372d7962018-05-18 21:35:00 +0000409
410 // This should run after inlining to have any chance of doing anything,
411 // and before other cleanup optimizations.
412 PM.add(createAMDGPULowerKernelAttributesPass());
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000413 });
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000414}
415
Tom Stellard45bb48e2015-06-13 03:28:10 +0000416//===----------------------------------------------------------------------===//
417// R600 Target Machine (R600 -> Cayman)
418//===----------------------------------------------------------------------===//
419
420R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000421 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000422 TargetOptions Options,
423 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000424 Optional<CodeModel::Model> CM,
425 CodeGenOpt::Level OL, bool JIT)
426 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000427 setRequiresStructuredCFG(true);
428}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000429
430const R600Subtarget *R600TargetMachine::getSubtargetImpl(
431 const Function &F) const {
432 StringRef GPU = getGPUName(F);
433 StringRef FS = getFeatureString(F);
434
435 SmallString<128> SubtargetKey(GPU);
436 SubtargetKey.append(FS);
437
438 auto &I = SubtargetMap[SubtargetKey];
439 if (!I) {
440 // This needs to be done before we create a new subtarget since any
441 // creation will depend on the TM and the code generation flags on the
442 // function that reside in TargetOptions.
443 resetTargetOptions(F);
444 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
445 }
446
447 return I.get();
448}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000449
Tom Stellardc7624312018-05-30 22:55:35 +0000450TargetTransformInfo
451R600TargetMachine::getTargetTransformInfo(const Function &F) {
452 return TargetTransformInfo(R600TTIImpl(this, F));
453}
454
Tom Stellard45bb48e2015-06-13 03:28:10 +0000455//===----------------------------------------------------------------------===//
456// GCN Target Machine (SI+)
457//===----------------------------------------------------------------------===//
458
459GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000460 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000461 TargetOptions Options,
462 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000463 Optional<CodeModel::Model> CM,
464 CodeGenOpt::Level OL, bool JIT)
465 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000466
Tom Stellard5bfbae52018-07-11 20:59:01 +0000467const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000468 StringRef GPU = getGPUName(F);
469 StringRef FS = getFeatureString(F);
470
471 SmallString<128> SubtargetKey(GPU);
472 SubtargetKey.append(FS);
473
474 auto &I = SubtargetMap[SubtargetKey];
475 if (!I) {
476 // This needs to be done before we create a new subtarget since any
477 // creation will depend on the TM and the code generation flags on the
478 // function that reside in TargetOptions.
479 resetTargetOptions(F);
Tom Stellard5bfbae52018-07-11 20:59:01 +0000480 I = llvm::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000481 }
482
Alexander Timofeev18009562016-12-08 17:28:47 +0000483 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
484
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000485 return I.get();
486}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000487
Tom Stellardc7624312018-05-30 22:55:35 +0000488TargetTransformInfo
489GCNTargetMachine::getTargetTransformInfo(const Function &F) {
490 return TargetTransformInfo(GCNTTIImpl(this, F));
491}
492
Tom Stellard45bb48e2015-06-13 03:28:10 +0000493//===----------------------------------------------------------------------===//
494// AMDGPU Pass Setup
495//===----------------------------------------------------------------------===//
496
497namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000498
Tom Stellard45bb48e2015-06-13 03:28:10 +0000499class AMDGPUPassConfig : public TargetPassConfig {
500public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000501 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000502 : TargetPassConfig(TM, PM) {
Matt Arsenault0a109002015-09-25 17:41:20 +0000503 // Exceptions and StackMaps are not supported, so these passes will never do
504 // anything.
505 disablePass(&StackMapLivenessID);
506 disablePass(&FuncletLayoutID);
507 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000508
509 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
510 return getTM<AMDGPUTargetMachine>();
511 }
512
Matthias Braun115efcd2016-11-28 20:11:54 +0000513 ScheduleDAGInstrs *
514 createMachineScheduler(MachineSchedContext *C) const override {
515 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
516 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
517 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
518 return DAG;
519 }
520
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000521 void addEarlyCSEOrGVNPass();
522 void addStraightLineScalarOptimizationPasses();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000523 void addIRPasses() override;
Matt Arsenault908b9e22016-07-01 03:33:52 +0000524 void addCodeGenPrepare() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000525 bool addPreISel() override;
526 bool addInstSelector() override;
527 bool addGCPasses() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000528};
529
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000530class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000531public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000532 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000533 : AMDGPUPassConfig(TM, PM) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000534
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000535 ScheduleDAGInstrs *createMachineScheduler(
536 MachineSchedContext *C) const override {
537 return createR600MachineScheduler(C);
538 }
539
Tom Stellard45bb48e2015-06-13 03:28:10 +0000540 bool addPreISel() override;
Tom Stellard20287692017-08-08 04:57:55 +0000541 bool addInstSelector() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000542 void addPreRegAlloc() override;
543 void addPreSched2() override;
544 void addPreEmitPass() override;
545};
546
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000547class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000548public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000549 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000550 : AMDGPUPassConfig(TM, PM) {
Matt Arsenaulta2025382017-08-03 23:24:05 +0000551 // It is necessary to know the register usage of the entire call graph. We
552 // allow calls without EnableAMDGPUFunctionCalls if they are marked
553 // noinline, so this is always required.
554 setRequiresCodeGenSCCOrder(true);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000555 }
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000556
557 GCNTargetMachine &getGCNTargetMachine() const {
558 return getTM<GCNTargetMachine>();
559 }
560
561 ScheduleDAGInstrs *
Matt Arsenault03d85842016-06-27 20:32:13 +0000562 createMachineScheduler(MachineSchedContext *C) const override;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000563
Tom Stellard45bb48e2015-06-13 03:28:10 +0000564 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000565 void addMachineSSAOptimization() override;
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000566 bool addILPOpts() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000567 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000568 bool addIRTranslator() override;
Tim Northover33b07d62016-07-22 20:03:43 +0000569 bool addLegalizeMachineIR() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000570 bool addRegBankSelect() override;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000571 bool addGlobalInstructionSelect() override;
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000572 void addFastRegAlloc(FunctionPass *RegAllocPass) override;
573 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000574 void addPreRegAlloc() override;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000575 void addPostRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000576 void addPreSched2() override;
577 void addPreEmitPass() override;
578};
579
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000580} // end anonymous namespace
Tom Stellard45bb48e2015-06-13 03:28:10 +0000581
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000582void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
583 if (getOptLevel() == CodeGenOpt::Aggressive)
584 addPass(createGVNPass());
585 else
586 addPass(createEarlyCSEPass());
587}
588
589void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
Stanislav Mekhanoshin20d47952018-06-29 16:26:53 +0000590 addPass(createLICMPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000591 addPass(createSeparateConstOffsetFromGEPPass());
592 addPass(createSpeculativeExecutionPass());
593 // ReassociateGEPs exposes more opportunites for SLSR. See
594 // the example in reassociate-geps-and-slsr.ll.
595 addPass(createStraightLineStrengthReducePass());
596 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
597 // EarlyCSE can reuse.
598 addEarlyCSEOrGVNPass();
599 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
600 addPass(createNaryReassociatePass());
601 // NaryReassociate on GEPs creates redundant common expressions, so run
602 // EarlyCSE after it.
603 addPass(createEarlyCSEPass());
604}
605
Tom Stellard45bb48e2015-06-13 03:28:10 +0000606void AMDGPUPassConfig::addIRPasses() {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000607 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
608
Matt Arsenaultbde80342016-05-18 15:41:07 +0000609 // There is no reason to run these.
610 disablePass(&StackMapLivenessID);
611 disablePass(&FuncletLayoutID);
612 disablePass(&PatchableFunctionID);
613
Matt Arsenaultab411932018-10-02 03:50:56 +0000614 addPass(createAtomicExpandPass());
Scott Linder11ef7982018-10-26 13:18:36 +0000615
616 // This must occur before inlining, as the inliner will not look through
617 // bitcast calls.
618 addPass(createAMDGPUFixFunctionBitcastsPass());
619
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000620 addPass(createAMDGPULowerIntrinsicsPass());
Matt Arsenault0699ef32017-02-09 22:00:42 +0000621
Matt Arsenault635d4792018-10-03 02:47:25 +0000622 // Function calls are not supported, so make sure we inline everything.
623 addPass(createAMDGPUAlwaysInlinePass());
624 addPass(createAlwaysInlinerLegacyPass());
625 // We need to add the barrier noop pass, otherwise adding the function
626 // inlining pass will cause all of the PassConfigs passes to be run
627 // one function at a time, which means if we have a nodule with two
628 // functions, then we will generate code for the first function
629 // without ever running any passes on the second.
630 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000631
Matt Arsenault0c329382017-01-30 18:40:29 +0000632 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
633 // TODO: May want to move later or split into an early and late one.
634
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000635 addPass(createAMDGPUCodeGenPreparePass());
Matt Arsenault0c329382017-01-30 18:40:29 +0000636 }
637
Tom Stellardfd253952015-08-07 23:19:30 +0000638 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
Matt Arsenault432aaea2018-05-13 10:04:48 +0000639 if (TM.getTargetTriple().getArch() == Triple::r600)
640 addPass(createR600OpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000641
Yaxun Liude4b88d2017-10-10 19:39:48 +0000642 // Replace OpenCL enqueued block function pointers with global variables.
643 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
644
Matt Arsenault03d85842016-06-27 20:32:13 +0000645 if (TM.getOptLevel() > CodeGenOpt::None) {
Matt Arsenault417e0072017-02-08 06:16:04 +0000646 addPass(createInferAddressSpacesPass());
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000647 addPass(createAMDGPUPromoteAlloca());
Matt Arsenault03d85842016-06-27 20:32:13 +0000648
649 if (EnableSROA)
650 addPass(createSROAPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000651
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000652 addStraightLineScalarOptimizationPasses();
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000653
654 if (EnableAMDGPUAliasAnalysis) {
655 addPass(createAMDGPUAAWrapperPass());
656 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
657 AAResults &AAR) {
658 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
659 AAR.addAAResult(WrapperPass->getResult());
660 }));
661 }
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000662 }
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000663
664 TargetPassConfig::addIRPasses();
665
666 // EarlyCSE is not always strong enough to clean up what LSR produces. For
667 // example, GVN can combine
668 //
669 // %0 = add %a, %b
670 // %1 = add %b, %a
671 //
672 // and
673 //
674 // %0 = shl nsw %a, 2
675 // %1 = shl %a, 2
676 //
677 // but EarlyCSE can do neither of them.
678 if (getOptLevel() != CodeGenOpt::None)
679 addEarlyCSEOrGVNPass();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000680}
681
Matt Arsenault908b9e22016-07-01 03:33:52 +0000682void AMDGPUPassConfig::addCodeGenPrepare() {
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000683 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
684 EnableLowerKernelArguments)
685 addPass(createAMDGPULowerKernelArgumentsPass());
686
Matt Arsenault908b9e22016-07-01 03:33:52 +0000687 TargetPassConfig::addCodeGenPrepare();
688
689 if (EnableLoadStoreVectorizer)
690 addPass(createLoadStoreVectorizerPass());
691}
692
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000693bool AMDGPUPassConfig::addPreISel() {
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +0000694 addPass(createLowerSwitchPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000695 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000696 return false;
697}
698
699bool AMDGPUPassConfig::addInstSelector() {
Matt Arsenault7016f132017-08-03 22:30:46 +0000700 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000701 return false;
702}
703
Matt Arsenault0a109002015-09-25 17:41:20 +0000704bool AMDGPUPassConfig::addGCPasses() {
705 // Do nothing. GC is not supported.
706 return false;
707}
708
Tom Stellard45bb48e2015-06-13 03:28:10 +0000709//===----------------------------------------------------------------------===//
710// R600 Pass Setup
711//===----------------------------------------------------------------------===//
712
713bool R600PassConfig::addPreISel() {
714 AMDGPUPassConfig::addPreISel();
Matt Arsenaultc5816112016-06-24 06:30:22 +0000715
716 if (EnableR600StructurizeCFG)
Tom Stellardbc4497b2016-02-12 23:45:29 +0000717 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000718 return false;
719}
720
Tom Stellard20287692017-08-08 04:57:55 +0000721bool R600PassConfig::addInstSelector() {
722 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
723 return false;
724}
725
Tom Stellard45bb48e2015-06-13 03:28:10 +0000726void R600PassConfig::addPreRegAlloc() {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000727 addPass(createR600VectorRegMerger());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000728}
729
730void R600PassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000731 addPass(createR600EmitClauseMarkers(), false);
Matt Arsenault03d85842016-06-27 20:32:13 +0000732 if (EnableR600IfConvert)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000733 addPass(&IfConverterID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000734 addPass(createR600ClauseMergePass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000735}
736
737void R600PassConfig::addPreEmitPass() {
738 addPass(createAMDGPUCFGStructurizerPass(), false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000739 addPass(createR600ExpandSpecialInstrsPass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000740 addPass(&FinalizeMachineBundlesID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000741 addPass(createR600Packetizer(), false);
742 addPass(createR600ControlFlowFinalizer(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000743}
744
745TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +0000746 return new R600PassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000747}
748
749//===----------------------------------------------------------------------===//
750// GCN Pass Setup
751//===----------------------------------------------------------------------===//
752
Matt Arsenault03d85842016-06-27 20:32:13 +0000753ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
754 MachineSchedContext *C) const {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000755 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000756 if (ST.enableSIScheduler())
757 return createSIMachineScheduler(C);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000758 return createGCNMaxOccupancyMachineScheduler(C);
Matt Arsenault03d85842016-06-27 20:32:13 +0000759}
760
Tom Stellard45bb48e2015-06-13 03:28:10 +0000761bool GCNPassConfig::addPreISel() {
762 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000763
Neil Henning66416572018-10-08 15:49:19 +0000764 if (EnableAtomicOptimizations) {
765 addPass(createAMDGPUAtomicOptimizerPass());
766 }
767
Matt Arsenault39319482015-11-06 18:01:57 +0000768 // FIXME: We need to run a pass to propagate the attributes when calls are
769 // supported.
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000770 addPass(createAMDGPUAnnotateKernelFeaturesPass());
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000771
772 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
773 // regions formed by them.
774 addPass(&AMDGPUUnifyDivergentExitNodesID);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000775 if (!LateCFGStructurize) {
776 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
777 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000778 addPass(createSinkingPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000779 addPass(createAMDGPUAnnotateUniformValues());
Jan Sjodina06bfe02017-05-15 20:18:37 +0000780 if (!LateCFGStructurize) {
781 addPass(createSIAnnotateControlFlowPass());
782 }
Tom Stellarda6f24c62015-12-15 20:55:55 +0000783
Tom Stellard45bb48e2015-06-13 03:28:10 +0000784 return false;
785}
786
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000787void GCNPassConfig::addMachineSSAOptimization() {
788 TargetPassConfig::addMachineSSAOptimization();
789
790 // We want to fold operands after PeepholeOptimizer has run (or as part of
791 // it), because it will eliminate extra copies making it easier to fold the
792 // real source operand. We want to eliminate dead instructions after, so that
793 // we see fewer uses of the copies. We then need to clean up the dead
794 // instructions leftover after the operands are folded as well.
795 //
796 // XXX - Can we get away without running DeadMachineInstructionElim again?
797 addPass(&SIFoldOperandsID);
798 addPass(&DeadMachineInstructionElimID);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000799 addPass(&SILoadStoreOptimizerID);
Sam Kolton6e795292017-04-07 10:53:12 +0000800 if (EnableSDWAPeephole) {
801 addPass(&SIPeepholeSDWAID);
Matthias Braun4a7c8e72018-01-19 06:46:10 +0000802 addPass(&EarlyMachineLICMID);
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000803 addPass(&MachineCSEID);
804 addPass(&SIFoldOperandsID);
Sam Kolton6e795292017-04-07 10:53:12 +0000805 addPass(&DeadMachineInstructionElimID);
806 }
Stanislav Mekhanoshin03306602017-06-03 17:39:47 +0000807 addPass(createSIShrinkInstructionsPass());
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000808}
809
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000810bool GCNPassConfig::addILPOpts() {
811 if (EnableEarlyIfConversion)
812 addPass(&EarlyIfConverterID);
813
814 TargetPassConfig::addILPOpts();
815 return false;
816}
817
Tom Stellard45bb48e2015-06-13 03:28:10 +0000818bool GCNPassConfig::addInstSelector() {
819 AMDGPUPassConfig::addInstSelector();
Matt Arsenault782c03b2015-11-03 22:30:13 +0000820 addPass(&SIFixSGPRCopiesID);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000821 addPass(createSILowerI1CopiesPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000822 return false;
823}
824
Tom Stellard000c5af2016-04-14 19:09:28 +0000825bool GCNPassConfig::addIRTranslator() {
826 addPass(new IRTranslator());
827 return false;
828}
829
Tim Northover33b07d62016-07-22 20:03:43 +0000830bool GCNPassConfig::addLegalizeMachineIR() {
Tom Stellardca166212017-01-30 21:56:46 +0000831 addPass(new Legalizer());
Tim Northover33b07d62016-07-22 20:03:43 +0000832 return false;
833}
834
Tom Stellard000c5af2016-04-14 19:09:28 +0000835bool GCNPassConfig::addRegBankSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000836 addPass(new RegBankSelect());
Tom Stellard000c5af2016-04-14 19:09:28 +0000837 return false;
838}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000839
840bool GCNPassConfig::addGlobalInstructionSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000841 addPass(new InstructionSelect());
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000842 return false;
843}
Tom Stellardca166212017-01-30 21:56:46 +0000844
Tom Stellard45bb48e2015-06-13 03:28:10 +0000845void GCNPassConfig::addPreRegAlloc() {
Jan Sjodina06bfe02017-05-15 20:18:37 +0000846 if (LateCFGStructurize) {
847 addPass(createAMDGPUMachineCFGStructurizerPass());
848 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000849 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000850}
851
852void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000853 // FIXME: We have to disable the verifier here because of PHIElimination +
854 // TwoAddressInstructions disabling it.
Matt Arsenaulte6740752016-09-29 01:44:16 +0000855
856 // This must be run immediately after phi elimination and before
857 // TwoAddressInstructions, otherwise the processing of the tied operand of
858 // SI_ELSE will introduce a copy of the tied operand source after the else.
859 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000860
Connor Abbott92638ab2017-08-04 18:36:52 +0000861 // This must be run after SILowerControlFlow, since it needs to use the
862 // machine-level CFG, but before register allocation.
863 insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
864
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000865 TargetPassConfig::addFastRegAlloc(RegAllocPass);
866}
867
868void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenault9d288e62017-08-07 18:12:48 +0000869 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
Stanislav Mekhanoshin37e7f952017-08-01 23:14:32 +0000870
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000871 insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
872
Matt Arsenaulte6740752016-09-29 01:44:16 +0000873 // This must be run immediately after phi elimination and before
874 // TwoAddressInstructions, otherwise the processing of the tied operand of
875 // SI_ELSE will introduce a copy of the tied operand source after the else.
876 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000877
Connor Abbott92638ab2017-08-04 18:36:52 +0000878 // This must be run after SILowerControlFlow, since it needs to use the
879 // machine-level CFG, but before register allocation.
880 insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
881
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000882 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000883}
884
Matt Arsenaulte6740752016-09-29 01:44:16 +0000885void GCNPassConfig::addPostRegAlloc() {
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000886 addPass(&SIFixVGPRCopiesID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000887 addPass(&SIOptimizeExecMaskingID);
888 TargetPassConfig::addPostRegAlloc();
889}
890
Tom Stellard45bb48e2015-06-13 03:28:10 +0000891void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000892}
893
894void GCNPassConfig::addPreEmitPass() {
Mark Searles72da47d2018-07-16 10:02:41 +0000895 addPass(createSIMemoryLegalizerPass());
896 addPass(createSIInsertWaitcntsPass());
897 addPass(createSIShrinkInstructionsPass());
898
Tom Stellardcb6ba622016-04-30 00:23:06 +0000899 // The hazard recognizer that runs as part of the post-ra scheduler does not
Matt Arsenault254a6452016-06-28 16:59:53 +0000900 // guarantee to be able handle all hazards correctly. This is because if there
901 // are multiple scheduling regions in a basic block, the regions are scheduled
902 // bottom up, so when we begin to schedule a region we don't know what
903 // instructions were emitted directly before it.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000904 //
Matt Arsenault254a6452016-06-28 16:59:53 +0000905 // Here we add a stand-alone hazard recognizer pass which can handle all
906 // cases.
Mark Searles72da47d2018-07-16 10:02:41 +0000907 //
908 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
909 // be better for it to emit S_NOP <N> when possible.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000910 addPass(&PostRAHazardRecognizerID);
911
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000912 addPass(&SIInsertSkipsPassID);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000913 addPass(createSIDebuggerInsertNopsPass());
Matt Arsenault6bc43d82016-10-06 16:20:41 +0000914 addPass(&BranchRelaxationPassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000915}
916
917TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +0000918 return new GCNPassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000919}