blob: a1c973009203dc1499f21e7a3217ebe2cf323475 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief The AMDGPU target machine contains all of the hardware specific
12/// information needed to emit code for R600 and SI GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUTargetMachine.h"
Tom Stellardc93fc112015-12-10 02:13:01 +000017#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "AMDGPU.h"
19#include "AMDGPUTargetTransformInfo.h"
20#include "R600ISelLowering.h"
21#include "R600InstrInfo.h"
22#include "R600MachineScheduler.h"
23#include "SIISelLowering.h"
24#include "SIInstrInfo.h"
25#include "llvm/Analysis/Passes.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000026#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000027#include "llvm/CodeGen/MachineFunctionAnalysis.h"
28#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29#include "llvm/CodeGen/MachineModuleInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/IR/Verifier.h"
32#include "llvm/MC/MCAsmInfo.h"
33#include "llvm/IR/LegacyPassManager.h"
34#include "llvm/Support/TargetRegistry.h"
35#include "llvm/Support/raw_os_ostream.h"
36#include "llvm/Transforms/IPO.h"
37#include "llvm/Transforms/Scalar.h"
38#include <llvm/CodeGen/Passes.h>
39
40using namespace llvm;
41
42extern "C" void LLVMInitializeAMDGPUTarget() {
43 // Register the target
44 RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
45 RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
Matt Arsenaultb87fc222015-10-01 22:10:03 +000046
47 PassRegistry *PR = PassRegistry::getPassRegistry();
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +000048 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +000049 initializeSIFixSGPRCopiesPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +000050 initializeSIFoldOperandsPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +000051 initializeSIFixControlFlowLiveIntervalsPass(*PR);
52 initializeSILoadStoreOptimizerPass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +000053 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +000054 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +000055 initializeAMDGPUPromoteAllocaPass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +000056 initializeSIAnnotateControlFlowPass(*PR);
Tom Stellardcc7067a62016-03-03 03:53:29 +000057 initializeSIInsertNopsPass(*PR);
Tom Stellard6e1967e2016-02-05 17:42:38 +000058 initializeSIInsertWaitsPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000059 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +000060 initializeSILowerControlFlowPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +000061}
62
Tom Stellarde135ffd2015-09-25 21:41:28 +000063static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Tom Stellardc93fc112015-12-10 02:13:01 +000064 return make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +000065}
66
Tom Stellard45bb48e2015-06-13 03:28:10 +000067static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
68 return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
69}
70
71static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +000072R600SchedRegistry("r600", "Run R600's custom scheduler",
73 createR600MachineScheduler);
74
75static MachineSchedRegistry
76SISchedRegistry("si", "Run SI's custom scheduler",
77 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +000078
79static std::string computeDataLayout(const Triple &TT) {
80 std::string Ret = "e-p:32:32";
81
82 if (TT.getArch() == Triple::amdgcn) {
83 // 32-bit private, local, and region pointers. 64-bit global and constant.
84 Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
85 }
86
87 Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
88 "-v512:512-v1024:1024-v2048:2048-n32:64";
89
90 return Ret;
91}
92
Matt Arsenaultb22828f2016-01-27 02:17:49 +000093LLVM_READNONE
94static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
95 if (!GPU.empty())
96 return GPU;
97
98 // HSA only supports CI+, so change the default GPU to a CI for HSA.
99 if (TT.getArch() == Triple::amdgcn)
100 return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
101
102 return "";
103}
104
Tom Stellard45bb48e2015-06-13 03:28:10 +0000105AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
106 StringRef CPU, StringRef FS,
107 TargetOptions Options, Reloc::Model RM,
108 CodeModel::Model CM,
109 CodeGenOpt::Level OptLevel)
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000110 : LLVMTargetMachine(T, computeDataLayout(TT), TT,
111 getGPUOrDefault(TT, CPU), FS, Options, RM, CM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000112 OptLevel),
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000113 TLOF(createTLOF(getTargetTriple())),
114 Subtarget(TT, getTargetCPU(), FS, *this),
Tom Stellard45bb48e2015-06-13 03:28:10 +0000115 IntrinsicInfo() {
116 setRequiresStructuredCFG(true);
117 initAsmInfo();
118}
119
Tom Stellarde135ffd2015-09-25 21:41:28 +0000120AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000121
122//===----------------------------------------------------------------------===//
123// R600 Target Machine (R600 -> Cayman)
124//===----------------------------------------------------------------------===//
125
126R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000127 StringRef CPU, StringRef FS,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000128 TargetOptions Options, Reloc::Model RM,
129 CodeModel::Model CM, CodeGenOpt::Level OL)
Tom Stellard5dde1d22016-02-05 18:29:17 +0000130 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131
132//===----------------------------------------------------------------------===//
133// GCN Target Machine (SI+)
134//===----------------------------------------------------------------------===//
135
136GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000137 StringRef CPU, StringRef FS,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000138 TargetOptions Options, Reloc::Model RM,
139 CodeModel::Model CM, CodeGenOpt::Level OL)
Tom Stellard5dde1d22016-02-05 18:29:17 +0000140 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000141
142//===----------------------------------------------------------------------===//
143// AMDGPU Pass Setup
144//===----------------------------------------------------------------------===//
145
146namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000147
Tom Stellard45bb48e2015-06-13 03:28:10 +0000148class AMDGPUPassConfig : public TargetPassConfig {
149public:
150 AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000151 : TargetPassConfig(TM, PM) {
152
153 // Exceptions and StackMaps are not supported, so these passes will never do
154 // anything.
155 disablePass(&StackMapLivenessID);
156 disablePass(&FuncletLayoutID);
157 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000158
159 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
160 return getTM<AMDGPUTargetMachine>();
161 }
162
163 ScheduleDAGInstrs *
164 createMachineScheduler(MachineSchedContext *C) const override {
165 const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
166 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
167 return createR600MachineScheduler(C);
Tom Stellardde008d32016-01-21 04:28:34 +0000168 else if (ST.enableSIScheduler())
169 return createSIMachineScheduler(C);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000170 return nullptr;
171 }
172
173 void addIRPasses() override;
174 void addCodeGenPrepare() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000175 bool addPreISel() override;
176 bool addInstSelector() override;
177 bool addGCPasses() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000178};
179
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000180class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000181public:
182 R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
183 : AMDGPUPassConfig(TM, PM) { }
184
185 bool addPreISel() override;
186 void addPreRegAlloc() override;
187 void addPreSched2() override;
188 void addPreEmitPass() override;
189};
190
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000191class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000192public:
193 GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
194 : AMDGPUPassConfig(TM, PM) { }
195 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000196 void addMachineSSAOptimization() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000197 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000198#ifdef LLVM_BUILD_GLOBAL_ISEL
199 bool addIRTranslator() override;
200 bool addRegBankSelect() override;
201#endif
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000202 void addFastRegAlloc(FunctionPass *RegAllocPass) override;
203 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000204 void addPreRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000205 void addPreSched2() override;
206 void addPreEmitPass() override;
207};
208
209} // End of anonymous namespace
210
211TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
Eric Christophera4e5d3c2015-09-16 23:38:13 +0000212 return TargetIRAnalysis([this](const Function &F) {
Mehdi Amini5010ebf2015-07-09 02:08:42 +0000213 return TargetTransformInfo(
214 AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
215 });
Tom Stellard45bb48e2015-06-13 03:28:10 +0000216}
217
218void AMDGPUPassConfig::addIRPasses() {
219 // Function calls are not supported, so make sure we inline everything.
220 addPass(createAMDGPUAlwaysInlinePass());
221 addPass(createAlwaysInlinerPass());
222 // We need to add the barrier noop pass, otherwise adding the function
223 // inlining pass will cause all of the PassConfigs passes to be run
224 // one function at a time, which means if we have a nodule with two
225 // functions, then we will generate code for the first function
226 // without ever running any passes on the second.
227 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000228
Tom Stellardfd253952015-08-07 23:19:30 +0000229 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
230 addPass(createAMDGPUOpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000231
Tom Stellard45bb48e2015-06-13 03:28:10 +0000232 TargetPassConfig::addIRPasses();
233}
234
235void AMDGPUPassConfig::addCodeGenPrepare() {
Matt Arsenaulte0132462016-01-30 05:19:45 +0000236 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
237 const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
Matt Arsenault8b175672016-02-02 19:32:42 +0000238 if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
Matt Arsenaulte0132462016-01-30 05:19:45 +0000239 addPass(createAMDGPUPromoteAlloca(&TM));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000240 addPass(createSROAPass());
241 }
242 TargetPassConfig::addCodeGenPrepare();
243}
244
245bool
246AMDGPUPassConfig::addPreISel() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000247 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000248 return false;
249}
250
251bool AMDGPUPassConfig::addInstSelector() {
252 addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
253 return false;
254}
255
Matt Arsenault0a109002015-09-25 17:41:20 +0000256bool AMDGPUPassConfig::addGCPasses() {
257 // Do nothing. GC is not supported.
258 return false;
259}
260
Tom Stellard45bb48e2015-06-13 03:28:10 +0000261//===----------------------------------------------------------------------===//
262// R600 Pass Setup
263//===----------------------------------------------------------------------===//
264
265bool R600PassConfig::addPreISel() {
266 AMDGPUPassConfig::addPreISel();
Tom Stellardbc4497b2016-02-12 23:45:29 +0000267 const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
268 if (ST.IsIRStructurizerEnabled())
269 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000270 addPass(createR600TextureIntrinsicsReplacer());
271 return false;
272}
273
274void R600PassConfig::addPreRegAlloc() {
275 addPass(createR600VectorRegMerger(*TM));
276}
277
278void R600PassConfig::addPreSched2() {
279 const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
280 addPass(createR600EmitClauseMarkers(), false);
281 if (ST.isIfCvtEnabled())
282 addPass(&IfConverterID, false);
283 addPass(createR600ClauseMergePass(*TM), false);
284}
285
286void R600PassConfig::addPreEmitPass() {
287 addPass(createAMDGPUCFGStructurizerPass(), false);
288 addPass(createR600ExpandSpecialInstrsPass(*TM), false);
289 addPass(&FinalizeMachineBundlesID, false);
290 addPass(createR600Packetizer(*TM), false);
291 addPass(createR600ControlFlowFinalizer(*TM), false);
292}
293
294TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
295 return new R600PassConfig(this, PM);
296}
297
298//===----------------------------------------------------------------------===//
299// GCN Pass Setup
300//===----------------------------------------------------------------------===//
301
302bool GCNPassConfig::addPreISel() {
303 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000304
305 // FIXME: We need to run a pass to propagate the attributes when calls are
306 // supported.
307 addPass(&AMDGPUAnnotateKernelFeaturesID);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000308 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
Tom Stellard45bb48e2015-06-13 03:28:10 +0000309 addPass(createSinkingPass());
310 addPass(createSITypeRewriter());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000311 addPass(createAMDGPUAnnotateUniformValues());
Tom Stellardbc4497b2016-02-12 23:45:29 +0000312 addPass(createSIAnnotateControlFlowPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000313
Tom Stellard45bb48e2015-06-13 03:28:10 +0000314 return false;
315}
316
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000317void GCNPassConfig::addMachineSSAOptimization() {
318 TargetPassConfig::addMachineSSAOptimization();
319
320 // We want to fold operands after PeepholeOptimizer has run (or as part of
321 // it), because it will eliminate extra copies making it easier to fold the
322 // real source operand. We want to eliminate dead instructions after, so that
323 // we see fewer uses of the copies. We then need to clean up the dead
324 // instructions leftover after the operands are folded as well.
325 //
326 // XXX - Can we get away without running DeadMachineInstructionElim again?
327 addPass(&SIFoldOperandsID);
328 addPass(&DeadMachineInstructionElimID);
329}
330
Tom Stellard45bb48e2015-06-13 03:28:10 +0000331bool GCNPassConfig::addInstSelector() {
332 AMDGPUPassConfig::addInstSelector();
333 addPass(createSILowerI1CopiesPass());
Matt Arsenault782c03b2015-11-03 22:30:13 +0000334 addPass(&SIFixSGPRCopiesID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000335 return false;
336}
337
Tom Stellard000c5af2016-04-14 19:09:28 +0000338#ifdef LLVM_BUILD_GLOBAL_ISEL
339bool GCNPassConfig::addIRTranslator() {
340 addPass(new IRTranslator());
341 return false;
342}
343
344bool GCNPassConfig::addRegBankSelect() {
345 return false;
346}
347#endif
348
Tom Stellard45bb48e2015-06-13 03:28:10 +0000349void GCNPassConfig::addPreRegAlloc() {
350 const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
351
352 // This needs to be run directly before register allocation because
353 // earlier passes might recompute live intervals.
354 // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
355 if (getOptLevel() > CodeGenOpt::None) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000356 insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
357 }
358
359 if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
360 // Don't do this with no optimizations since it throws away debug info by
361 // merging nonadjacent loads.
362
363 // This should be run after scheduling, but before register allocation. It
364 // also need extra copies to the address operand to be eliminated.
Tom Stellard45bb48e2015-06-13 03:28:10 +0000365 insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000366 insertPass(&MachineSchedulerID, &RegisterCoalescerID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000367 }
368 addPass(createSIShrinkInstructionsPass(), false);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000369 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000370}
371
372void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000373 TargetPassConfig::addFastRegAlloc(RegAllocPass);
374}
375
376void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000377 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000378}
379
Tom Stellard45bb48e2015-06-13 03:28:10 +0000380void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000381}
382
383void GCNPassConfig::addPreEmitPass() {
Tom Stellardcb6ba622016-04-30 00:23:06 +0000384
385 // The hazard recognizer that runs as part of the post-ra scheduler does not
386 // gaurantee to be able handle all hazards correctly. This is because
387 // if there are multiple scheduling regions in a basic block, the regions
388 // are scheduled bottom up, so when we begin to schedule a region we don't
389 // know what instructions were emitted directly before it.
390 //
391 // Here we add a stand-alone hazard recognizer pass which can handle all cases.
392 // hazard recognizer pass.
393 addPass(&PostRAHazardRecognizerID);
394
Tom Stellard6e1967e2016-02-05 17:42:38 +0000395 addPass(createSIInsertWaitsPass(), false);
Matt Arsenaultcf2744f2016-04-29 20:23:42 +0000396 addPass(createSIShrinkInstructionsPass());
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000397 addPass(createSILowerControlFlowPass(), false);
Konstantin Zhuravlyova40d8352016-04-22 17:04:51 +0000398 addPass(createSIInsertNopsPass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000399}
400
401TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
402 return new GCNPassConfig(this, PM);
403}