blob: 672e49184a501030a7fca2602bea31874986b41c [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// The AMDGPU target machine contains all of the hardware specific
Tom Stellard45bb48e2015-06-13 03:28:10 +000011/// information needed to emit code for R600 and SI GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +000017#include "AMDGPUAliasAnalysis.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000018#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000019#include "AMDGPUInstructionSelector.h"
20#include "AMDGPULegalizerInfo.h"
Matt Arsenault9aa45f02017-07-06 20:57:05 +000021#include "AMDGPUMacroFusion.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000022#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "AMDGPUTargetTransformInfo.h"
Valery Pykhtinfd4c4102017-03-21 13:15:46 +000024#include "GCNIterativeScheduler.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000025#include "GCNSchedStrategy.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "R600MachineScheduler.h"
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000027#include "SIMachineFunctionInfo.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000028#include "SIMachineScheduler.h"
Richard Trieu8ce2ee92019-05-14 21:54:37 +000029#include "TargetInfo/AMDGPUTargetInfo.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000030#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000031#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
Tom Stellardca166212017-01-30 21:56:46 +000032#include "llvm/CodeGen/GlobalISel/Legalizer.h"
33#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000034#include "llvm/CodeGen/MIRParser/MIParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/CodeGen/Passes.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000036#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000037#include "llvm/IR/Attributes.h"
38#include "llvm/IR/Function.h"
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +000039#include "llvm/IR/LegacyPassManager.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000040#include "llvm/Pass.h"
41#include "llvm/Support/CommandLine.h"
42#include "llvm/Support/Compiler.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000043#include "llvm/Support/TargetRegistry.h"
David Blaikie6054e652018-03-23 23:58:19 +000044#include "llvm/Target/TargetLoweringObjectFile.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000045#include "llvm/Transforms/IPO.h"
46#include "llvm/Transforms/IPO/AlwaysInliner.h"
47#include "llvm/Transforms/IPO/PassManagerBuilder.h"
48#include "llvm/Transforms/Scalar.h"
49#include "llvm/Transforms/Scalar/GVN.h"
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +000050#include "llvm/Transforms/Utils.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000051#include "llvm/Transforms/Vectorize.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000052#include <memory>
Tom Stellard45bb48e2015-06-13 03:28:10 +000053
54using namespace llvm;
55
Matt Arsenaultc5816112016-06-24 06:30:22 +000056static cl::opt<bool> EnableR600StructurizeCFG(
57 "r600-ir-structurize",
58 cl::desc("Use StructurizeCFG IR pass"),
59 cl::init(true));
60
Matt Arsenault03d85842016-06-27 20:32:13 +000061static cl::opt<bool> EnableSROA(
62 "amdgpu-sroa",
63 cl::desc("Run SROA after promote alloca pass"),
64 cl::ReallyHidden,
65 cl::init(true));
66
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +000067static cl::opt<bool>
68EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
69 cl::desc("Run early if-conversion"),
70 cl::init(false));
71
Matt Arsenault4d47ac32019-03-27 16:58:30 +000072static cl::opt<bool>
73OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
74 cl::desc("Run pre-RA exec mask optimizations"),
75 cl::init(true));
76
Matt Arsenault03d85842016-06-27 20:32:13 +000077static cl::opt<bool> EnableR600IfConvert(
78 "r600-if-convert",
79 cl::desc("Use if conversion pass"),
80 cl::ReallyHidden,
81 cl::init(true));
82
Matt Arsenault908b9e22016-07-01 03:33:52 +000083// Option to disable vectorizer for tests.
84static cl::opt<bool> EnableLoadStoreVectorizer(
85 "amdgpu-load-store-vectorizer",
86 cl::desc("Enable load store vectorizer"),
Matt Arsenault0efdd062016-09-09 22:29:28 +000087 cl::init(true),
Matt Arsenault908b9e22016-07-01 03:33:52 +000088 cl::Hidden);
89
Hiroshi Inouec8e92452018-01-29 05:17:03 +000090// Option to control global loads scalarization
Alexander Timofeev18009562016-12-08 17:28:47 +000091static cl::opt<bool> ScalarizeGlobal(
92 "amdgpu-scalarize-global-loads",
93 cl::desc("Enable global load scalarization"),
Alexander Timofeev982aee62017-07-04 17:32:00 +000094 cl::init(true),
Alexander Timofeev18009562016-12-08 17:28:47 +000095 cl::Hidden);
96
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +000097// Option to run internalize pass.
98static cl::opt<bool> InternalizeSymbols(
99 "amdgpu-internalize-symbols",
100 cl::desc("Enable elimination of non-kernel functions and unused globals"),
101 cl::init(false),
102 cl::Hidden);
103
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000104// Option to inline all early.
105static cl::opt<bool> EarlyInlineAll(
106 "amdgpu-early-inline-all",
107 cl::desc("Inline all functions early"),
108 cl::init(false),
109 cl::Hidden);
110
Sam Koltonf60ad582017-03-21 12:51:34 +0000111static cl::opt<bool> EnableSDWAPeephole(
112 "amdgpu-sdwa-peephole",
113 cl::desc("Enable SDWA peepholer"),
Sam Kolton9fa16962017-04-06 15:03:28 +0000114 cl::init(true));
Sam Koltonf60ad582017-03-21 12:51:34 +0000115
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000116static cl::opt<bool> EnableDPPCombine(
117 "amdgpu-dpp-combine",
118 cl::desc("Enable DPP combiner"),
Valery Pykhtinded96df2019-02-11 11:15:03 +0000119 cl::init(true));
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000120
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000121// Enable address space based alias analysis
122static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
123 cl::desc("Enable AMDGPU Alias Analysis"),
124 cl::init(true));
125
Jan Sjodina06bfe02017-05-15 20:18:37 +0000126// Option to run late CFG structurizer
Matt Arsenaultcc852232017-10-10 20:22:07 +0000127static cl::opt<bool, true> LateCFGStructurize(
Jan Sjodina06bfe02017-05-15 20:18:37 +0000128 "amdgpu-late-structurize",
129 cl::desc("Enable late CFG structurization"),
Matt Arsenaultcc852232017-10-10 20:22:07 +0000130 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
Jan Sjodina06bfe02017-05-15 20:18:37 +0000131 cl::Hidden);
132
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000133static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt(
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000134 "amdgpu-function-calls",
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000135 cl::desc("Enable AMDGPU function call support"),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000136 cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000137 cl::init(true),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000138 cl::Hidden);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000139
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000140// Enable lib calls simplifications
141static cl::opt<bool> EnableLibCallSimplify(
142 "amdgpu-simplify-libcall",
Matt Arsenault2e4d3382018-05-29 19:35:46 +0000143 cl::desc("Enable amdgpu library simplifications"),
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000144 cl::init(true),
145 cl::Hidden);
146
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000147static cl::opt<bool> EnableLowerKernelArguments(
148 "amdgpu-ir-lower-kernel-arguments",
149 cl::desc("Lower kernel argument loads in IR pass"),
150 cl::init(true),
151 cl::Hidden);
152
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000153static cl::opt<bool> EnableRegReassign(
154 "amdgpu-reassign-regs",
155 cl::desc("Enable register reassign optimizations on gfx10+"),
156 cl::init(true),
157 cl::Hidden);
158
Neil Henning66416572018-10-08 15:49:19 +0000159// Enable atomic optimization
160static cl::opt<bool> EnableAtomicOptimizations(
161 "amdgpu-atomic-optimizations",
162 cl::desc("Enable atomic optimizations"),
163 cl::init(false),
164 cl::Hidden);
165
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000166// Enable Mode register optimization
167static cl::opt<bool> EnableSIModeRegisterPass(
168 "amdgpu-mode-register",
169 cl::desc("Enable mode register pass"),
170 cl::init(true),
171 cl::Hidden);
172
Stanislav Mekhanoshinc8f78f82019-04-05 20:11:32 +0000173// Option is used in lit tests to prevent deadcoding of patterns inspected.
174static cl::opt<bool>
175EnableDCEInRA("amdgpu-dce-in-ra",
176 cl::init(true), cl::Hidden,
177 cl::desc("Enable machine DCE inside regalloc"));
178
Nikita Popov3db93ac2019-04-07 17:22:16 +0000179static cl::opt<bool> EnableScalarIRPasses(
180 "amdgpu-scalar-ir-passes",
181 cl::desc("Enable scalar IR passes"),
182 cl::init(true),
183 cl::Hidden);
184
Tom Stellard4b0b2612019-06-11 03:21:13 +0000185extern "C" void LLVMInitializeAMDGPUTarget() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000186 // Register the target
Mehdi Aminif42454b2016-10-09 23:00:34 +0000187 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
188 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000189
190 PassRegistry *PR = PassRegistry::getPassRegistry();
Tom Stellarda2f57be2017-08-02 22:19:45 +0000191 initializeR600ClauseMergePassPass(*PR);
192 initializeR600ControlFlowFinalizerPass(*PR);
193 initializeR600PacketizerPass(*PR);
194 initializeR600ExpandSpecialInstrsPassPass(*PR);
195 initializeR600VectorRegMergerPass(*PR);
Tom Stellarde753c522018-04-09 16:09:13 +0000196 initializeGlobalISel(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000197 initializeAMDGPUDAGToDAGISelPass(*PR);
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000198 initializeGCNDPPCombinePass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000199 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault5b0922f2019-07-03 23:32:29 +0000200 initializeSILowerSGPRSpillsPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +0000201 initializeSIFixSGPRCopiesPass(*PR);
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000202 initializeSIFixVGPRCopiesPass(*PR);
Ron Liebermancac749a2018-11-16 01:13:34 +0000203 initializeSIFixupVectorISelPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000204 initializeSIFoldOperandsPass(*PR);
Sam Koltonf60ad582017-03-21 12:51:34 +0000205 initializeSIPeepholeSDWAPass(*PR);
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +0000206 initializeSIShrinkInstructionsPass(*PR);
Stanislav Mekhanoshin37e7f952017-08-01 23:14:32 +0000207 initializeSIOptimizeExecMaskingPreRAPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +0000208 initializeSILoadStoreOptimizerPass(*PR);
Scott Linder11ef7982018-10-26 13:18:36 +0000209 initializeAMDGPUFixFunctionBitcastsPass(*PR);
Matt Arsenault746e0652017-06-02 18:02:42 +0000210 initializeAMDGPUAlwaysInlinePass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +0000211 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +0000212 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000213 initializeAMDGPUArgumentUsageInfoPass(*PR);
Neil Henning66416572018-10-08 15:49:19 +0000214 initializeAMDGPUAtomicOptimizerPass(*PR);
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000215 initializeAMDGPULowerKernelArgumentsPass(*PR);
Matt Arsenault372d7962018-05-18 21:35:00 +0000216 initializeAMDGPULowerKernelAttributesPass(*PR);
Matt Arsenault0699ef32017-02-09 22:00:42 +0000217 initializeAMDGPULowerIntrinsicsPass(*PR);
Yaxun Liude4b88d2017-10-10 19:39:48 +0000218 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000219 initializeAMDGPUPromoteAllocaPass(*PR);
Matt Arsenault86de4862016-06-24 07:07:55 +0000220 initializeAMDGPUCodeGenPreparePass(*PR);
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000221 initializeAMDGPUPropagateAttributesEarlyPass(*PR);
222 initializeAMDGPUPropagateAttributesLatePass(*PR);
Matt Arsenaultc06574f2017-07-28 18:40:05 +0000223 initializeAMDGPURewriteOutArgumentsPass(*PR);
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000224 initializeAMDGPUUnifyMetadataPass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +0000225 initializeSIAnnotateControlFlowPass(*PR);
Kannan Narayananacb089e2017-04-12 03:25:12 +0000226 initializeSIInsertWaitcntsPass(*PR);
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000227 initializeSIModeRegisterPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000228 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000229 initializeSILowerControlFlowPass(*PR);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000230 initializeSIInsertSkipsPass(*PR);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000231 initializeSIMemoryLegalizerPass(*PR);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000232 initializeSIOptimizeExecMaskingPass(*PR);
Neil Henning0a30f332019-04-01 15:19:52 +0000233 initializeSIPreAllocateWWMRegsPass(*PR);
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000234 initializeSIFormMemoryClausesPass(*PR);
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000235 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000236 initializeAMDGPUAAWrapperPassPass(*PR);
Matt Arsenault8ba740a2018-11-07 20:26:42 +0000237 initializeAMDGPUExternalAAWrapperPass(*PR);
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000238 initializeAMDGPUUseNativeCallsPass(*PR);
239 initializeAMDGPUSimplifyLibCallsPass(*PR);
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000240 initializeAMDGPUInlinerPass(*PR);
Stanislav Mekhanoshin3b7925f2019-05-01 16:49:31 +0000241 initializeGCNRegBankReassignPass(*PR);
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000242 initializeGCNNSAReassignPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000243}
244
Tom Stellarde135ffd2015-09-25 21:41:28 +0000245static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000246 return llvm::make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000247}
248
Tom Stellard45bb48e2015-06-13 03:28:10 +0000249static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000250 return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000251}
252
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +0000253static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
254 return new SIScheduleDAGMI(C);
255}
256
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000257static ScheduleDAGInstrs *
258createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
259 ScheduleDAGMILive *DAG =
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000260 new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
Matthias Braun115efcd2016-11-28 20:11:54 +0000261 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
262 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
Matt Arsenault9aa45f02017-07-06 20:57:05 +0000263 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000264 return DAG;
265}
266
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000267static ScheduleDAGInstrs *
268createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
269 auto DAG = new GCNIterativeScheduler(C,
270 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
271 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
272 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
273 return DAG;
274}
275
276static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
277 return new GCNIterativeScheduler(C,
278 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
279}
280
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000281static ScheduleDAGInstrs *
282createIterativeILPMachineScheduler(MachineSchedContext *C) {
283 auto DAG = new GCNIterativeScheduler(C,
284 GCNIterativeScheduler::SCHEDULE_ILP);
285 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
286 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
287 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
288 return DAG;
289}
290
Tom Stellard45bb48e2015-06-13 03:28:10 +0000291static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +0000292R600SchedRegistry("r600", "Run R600's custom scheduler",
293 createR600MachineScheduler);
294
295static MachineSchedRegistry
296SISchedRegistry("si", "Run SI's custom scheduler",
297 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000298
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000299static MachineSchedRegistry
300GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
301 "Run GCN scheduler to maximize occupancy",
302 createGCNMaxOccupancyMachineScheduler);
303
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000304static MachineSchedRegistry
305IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
306 "Run GCN scheduler to maximize occupancy (experimental)",
307 createIterativeGCNMaxOccupancyMachineScheduler);
308
309static MachineSchedRegistry
310GCNMinRegSchedRegistry("gcn-minreg",
311 "Run GCN iterative scheduler for minimal register usage (experimental)",
312 createMinRegScheduler);
313
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000314static MachineSchedRegistry
315GCNILPSchedRegistry("gcn-ilp",
316 "Run GCN iterative scheduler for ILP scheduling (experimental)",
317 createIterativeILPMachineScheduler);
318
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000319static StringRef computeDataLayout(const Triple &TT) {
320 if (TT.getArch() == Triple::r600) {
321 // 32-bit pointers.
Yaxun Liucc56a8b2017-11-06 14:32:33 +0000322 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Matt Arsenault95329f82018-03-27 19:26:40 +0000323 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000324 }
325
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000326 // 32-bit private, local, and region pointers. 64-bit global, constant and
Neil Henning523dab02019-03-18 14:44:28 +0000327 // flat, non-integral buffer fat pointers.
Yaxun Liu0124b542018-02-13 18:00:25 +0000328 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000329 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Neil Henning523dab02019-03-18 14:44:28 +0000330 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
331 "-ni:7";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000332}
333
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000334LLVM_READNONE
335static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
336 if (!GPU.empty())
337 return GPU;
338
Matt Arsenaulte0c1f9e2019-03-17 21:31:35 +0000339 // Need to default to a target with flat support for HSA.
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000340 if (TT.getArch() == Triple::amdgcn)
Matt Arsenaulte0c1f9e2019-03-17 21:31:35 +0000341 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000342
Matt Arsenault8e001942016-06-02 18:37:16 +0000343 return "r600";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000344}
345
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000346static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
Tom Stellard418beb72016-07-13 14:23:33 +0000347 // The AMDGPU toolchain only supports generating shared objects, so we
348 // must always use PIC.
349 return Reloc::PIC_;
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000350}
351
Tom Stellard45bb48e2015-06-13 03:28:10 +0000352AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
353 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000354 TargetOptions Options,
355 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000356 Optional<CodeModel::Model> CM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000357 CodeGenOpt::Level OptLevel)
Matthias Braunbb8507e2017-10-12 22:57:28 +0000358 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
359 FS, Options, getEffectiveRelocModel(RM),
David Greenca29c272018-12-07 12:10:23 +0000360 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
Rafael Espindola79e238a2017-08-03 02:16:21 +0000361 TLOF(createTLOF(getTargetTriple())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000362 initAsmInfo();
363}
364
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000365bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
Matt Arsenaulta6801992018-07-10 14:03:41 +0000366bool AMDGPUTargetMachine::EnableFunctionCalls = false;
367
368AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000369
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000370StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
371 Attribute GPUAttr = F.getFnAttribute("target-cpu");
372 return GPUAttr.hasAttribute(Attribute::None) ?
373 getTargetCPU() : GPUAttr.getValueAsString();
374}
375
376StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
377 Attribute FSAttr = F.getFnAttribute("target-features");
378
379 return FSAttr.hasAttribute(Attribute::None) ?
380 getTargetFeatureString() :
381 FSAttr.getValueAsString();
382}
383
Matt Arsenaulte745d992017-09-19 07:40:11 +0000384/// Predicate for Internalize pass.
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000385static bool mustPreserveGV(const GlobalValue &GV) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000386 if (const Function *F = dyn_cast<Function>(&GV))
387 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
388
389 return !GV.use_empty();
390}
391
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000392void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
Stanislav Mekhanoshinee2dd782017-03-17 17:13:41 +0000393 Builder.DivergentTarget = true;
394
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000395 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
Matt Arsenaulte745d992017-09-19 07:40:11 +0000396 bool Internalize = InternalizeSymbols;
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000397 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000398 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
399 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000400
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000401 if (EnableFunctionCalls) {
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000402 delete Builder.Inliner;
Stanislav Mekhanoshin56418202017-09-20 06:10:15 +0000403 Builder.Inliner = createAMDGPUFunctionInliningPass();
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000404 }
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000405
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000406 Builder.addExtension(
Stanislav Mekhanoshinf6c1feb2017-01-27 16:38:10 +0000407 PassManagerBuilder::EP_ModuleOptimizerEarly,
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000408 [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &,
409 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000410 if (AMDGPUAA) {
411 PM.add(createAMDGPUAAWrapperPass());
412 PM.add(createAMDGPUExternalAAWrapperPass());
413 }
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000414 PM.add(createAMDGPUUnifyMetadataPass());
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000415 PM.add(createAMDGPUPropagateAttributesLatePass(this));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000416 if (Internalize) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000417 PM.add(createInternalizePass(mustPreserveGV));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000418 PM.add(createGlobalDCEPass());
419 }
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000420 if (EarlyInline)
Stanislav Mekhanoshin89653df2017-03-30 20:16:02 +0000421 PM.add(createAMDGPUAlwaysInlinePass(false));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000422 });
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000423
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000424 const auto &Opt = Options;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000425 Builder.addExtension(
426 PassManagerBuilder::EP_EarlyAsPossible,
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000427 [AMDGPUAA, LibCallSimplify, &Opt, this](const PassManagerBuilder &,
428 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000429 if (AMDGPUAA) {
430 PM.add(createAMDGPUAAWrapperPass());
431 PM.add(createAMDGPUExternalAAWrapperPass());
432 }
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000433 PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this));
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000434 PM.add(llvm::createAMDGPUUseNativeCallsPass());
435 if (LibCallSimplify)
Stanislav Mekhanoshina9191c82019-06-17 17:57:50 +0000436 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt, this));
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000437 });
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000438
439 Builder.addExtension(
440 PassManagerBuilder::EP_CGSCCOptimizerLate,
441 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
442 // Add infer address spaces pass to the opt pipeline after inlining
443 // but before SROA to increase SROA opportunities.
444 PM.add(createInferAddressSpacesPass());
Matt Arsenault372d7962018-05-18 21:35:00 +0000445
446 // This should run after inlining to have any chance of doing anything,
447 // and before other cleanup optimizations.
448 PM.add(createAMDGPULowerKernelAttributesPass());
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000449 });
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000450}
451
Tom Stellard45bb48e2015-06-13 03:28:10 +0000452//===----------------------------------------------------------------------===//
453// R600 Target Machine (R600 -> Cayman)
454//===----------------------------------------------------------------------===//
455
456R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000457 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000458 TargetOptions Options,
459 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000460 Optional<CodeModel::Model> CM,
461 CodeGenOpt::Level OL, bool JIT)
462 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000463 setRequiresStructuredCFG(true);
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000464
Matt Arsenault09a09ef2019-02-28 00:52:33 +0000465 // Override the default since calls aren't supported for r600.
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000466 if (EnableFunctionCalls &&
467 EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
468 EnableFunctionCalls = false;
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000469}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000470
471const R600Subtarget *R600TargetMachine::getSubtargetImpl(
472 const Function &F) const {
473 StringRef GPU = getGPUName(F);
474 StringRef FS = getFeatureString(F);
475
476 SmallString<128> SubtargetKey(GPU);
477 SubtargetKey.append(FS);
478
479 auto &I = SubtargetMap[SubtargetKey];
480 if (!I) {
481 // This needs to be done before we create a new subtarget since any
482 // creation will depend on the TM and the code generation flags on the
483 // function that reside in TargetOptions.
484 resetTargetOptions(F);
485 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
486 }
487
488 return I.get();
489}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000490
Tom Stellardc7624312018-05-30 22:55:35 +0000491TargetTransformInfo
492R600TargetMachine::getTargetTransformInfo(const Function &F) {
493 return TargetTransformInfo(R600TTIImpl(this, F));
494}
495
Tom Stellard45bb48e2015-06-13 03:28:10 +0000496//===----------------------------------------------------------------------===//
497// GCN Target Machine (SI+)
498//===----------------------------------------------------------------------===//
499
500GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000501 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000502 TargetOptions Options,
503 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000504 Optional<CodeModel::Model> CM,
505 CodeGenOpt::Level OL, bool JIT)
506 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000507
Tom Stellard5bfbae52018-07-11 20:59:01 +0000508const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000509 StringRef GPU = getGPUName(F);
510 StringRef FS = getFeatureString(F);
511
512 SmallString<128> SubtargetKey(GPU);
513 SubtargetKey.append(FS);
514
515 auto &I = SubtargetMap[SubtargetKey];
516 if (!I) {
517 // This needs to be done before we create a new subtarget since any
518 // creation will depend on the TM and the code generation flags on the
519 // function that reside in TargetOptions.
520 resetTargetOptions(F);
Tom Stellard5bfbae52018-07-11 20:59:01 +0000521 I = llvm::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000522 }
523
Alexander Timofeev18009562016-12-08 17:28:47 +0000524 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
525
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000526 return I.get();
527}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000528
Tom Stellardc7624312018-05-30 22:55:35 +0000529TargetTransformInfo
530GCNTargetMachine::getTargetTransformInfo(const Function &F) {
531 return TargetTransformInfo(GCNTTIImpl(this, F));
532}
533
Tom Stellard45bb48e2015-06-13 03:28:10 +0000534//===----------------------------------------------------------------------===//
535// AMDGPU Pass Setup
536//===----------------------------------------------------------------------===//
537
538namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000539
Tom Stellard45bb48e2015-06-13 03:28:10 +0000540class AMDGPUPassConfig : public TargetPassConfig {
541public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000542 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000543 : TargetPassConfig(TM, PM) {
Matt Arsenault0a109002015-09-25 17:41:20 +0000544 // Exceptions and StackMaps are not supported, so these passes will never do
545 // anything.
546 disablePass(&StackMapLivenessID);
547 disablePass(&FuncletLayoutID);
548 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000549
550 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
551 return getTM<AMDGPUTargetMachine>();
552 }
553
Matthias Braun115efcd2016-11-28 20:11:54 +0000554 ScheduleDAGInstrs *
555 createMachineScheduler(MachineSchedContext *C) const override {
556 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
557 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
558 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
559 return DAG;
560 }
561
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000562 void addEarlyCSEOrGVNPass();
563 void addStraightLineScalarOptimizationPasses();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000564 void addIRPasses() override;
Matt Arsenault908b9e22016-07-01 03:33:52 +0000565 void addCodeGenPrepare() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000566 bool addPreISel() override;
567 bool addInstSelector() override;
568 bool addGCPasses() override;
Amara Emersond1896802019-04-15 04:53:46 +0000569
570 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000571};
572
Amara Emersond1896802019-04-15 04:53:46 +0000573std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const {
574 return getStandardCSEConfigForOpt(TM->getOptLevel());
575}
576
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000577class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000578public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000579 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000580 : AMDGPUPassConfig(TM, PM) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000581
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000582 ScheduleDAGInstrs *createMachineScheduler(
583 MachineSchedContext *C) const override {
584 return createR600MachineScheduler(C);
585 }
586
Tom Stellard45bb48e2015-06-13 03:28:10 +0000587 bool addPreISel() override;
Tom Stellard20287692017-08-08 04:57:55 +0000588 bool addInstSelector() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000589 void addPreRegAlloc() override;
590 void addPreSched2() override;
591 void addPreEmitPass() override;
592};
593
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000594class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000595public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000596 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000597 : AMDGPUPassConfig(TM, PM) {
Matt Arsenaulta2025382017-08-03 23:24:05 +0000598 // It is necessary to know the register usage of the entire call graph. We
599 // allow calls without EnableAMDGPUFunctionCalls if they are marked
600 // noinline, so this is always required.
601 setRequiresCodeGenSCCOrder(true);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000602 }
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000603
604 GCNTargetMachine &getGCNTargetMachine() const {
605 return getTM<GCNTargetMachine>();
606 }
607
608 ScheduleDAGInstrs *
Matt Arsenault03d85842016-06-27 20:32:13 +0000609 createMachineScheduler(MachineSchedContext *C) const override;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000610
Tom Stellard45bb48e2015-06-13 03:28:10 +0000611 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000612 void addMachineSSAOptimization() override;
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000613 bool addILPOpts() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000614 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000615 bool addIRTranslator() override;
Tim Northover33b07d62016-07-22 20:03:43 +0000616 bool addLegalizeMachineIR() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000617 bool addRegBankSelect() override;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000618 bool addGlobalInstructionSelect() override;
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000619 void addFastRegAlloc() override;
620 void addOptimizedRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000621 void addPreRegAlloc() override;
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000622 bool addPreRewrite() override;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000623 void addPostRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000624 void addPreSched2() override;
625 void addPreEmitPass() override;
626};
627
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000628} // end anonymous namespace
Tom Stellard45bb48e2015-06-13 03:28:10 +0000629
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000630void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
631 if (getOptLevel() == CodeGenOpt::Aggressive)
632 addPass(createGVNPass());
633 else
634 addPass(createEarlyCSEPass());
635}
636
637void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
Stanislav Mekhanoshin20d47952018-06-29 16:26:53 +0000638 addPass(createLICMPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000639 addPass(createSeparateConstOffsetFromGEPPass());
640 addPass(createSpeculativeExecutionPass());
641 // ReassociateGEPs exposes more opportunites for SLSR. See
642 // the example in reassociate-geps-and-slsr.ll.
643 addPass(createStraightLineStrengthReducePass());
644 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
645 // EarlyCSE can reuse.
646 addEarlyCSEOrGVNPass();
647 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
648 addPass(createNaryReassociatePass());
649 // NaryReassociate on GEPs creates redundant common expressions, so run
650 // EarlyCSE after it.
651 addPass(createEarlyCSEPass());
652}
653
Tom Stellard45bb48e2015-06-13 03:28:10 +0000654void AMDGPUPassConfig::addIRPasses() {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000655 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
656
Matt Arsenaultbde80342016-05-18 15:41:07 +0000657 // There is no reason to run these.
658 disablePass(&StackMapLivenessID);
659 disablePass(&FuncletLayoutID);
660 disablePass(&PatchableFunctionID);
661
Stanislav Mekhanoshin31382782019-06-17 20:42:48 +0000662 // This must occur before inlining, as the inliner will not look through
663 // bitcast calls.
664 addPass(createAMDGPUFixFunctionBitcastsPass());
665
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000666 // A call to propagate attributes pass in the backend in case opt was not run.
667 addPass(createAMDGPUPropagateAttributesEarlyPass(&TM));
668
Matt Arsenaultab411932018-10-02 03:50:56 +0000669 addPass(createAtomicExpandPass());
Scott Linder11ef7982018-10-26 13:18:36 +0000670
Scott Linder11ef7982018-10-26 13:18:36 +0000671
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000672 addPass(createAMDGPULowerIntrinsicsPass());
Matt Arsenault0699ef32017-02-09 22:00:42 +0000673
Matt Arsenault635d4792018-10-03 02:47:25 +0000674 // Function calls are not supported, so make sure we inline everything.
675 addPass(createAMDGPUAlwaysInlinePass());
676 addPass(createAlwaysInlinerLegacyPass());
677 // We need to add the barrier noop pass, otherwise adding the function
678 // inlining pass will cause all of the PassConfigs passes to be run
679 // one function at a time, which means if we have a nodule with two
680 // functions, then we will generate code for the first function
681 // without ever running any passes on the second.
682 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000683
Matt Arsenault0c329382017-01-30 18:40:29 +0000684 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
685 // TODO: May want to move later or split into an early and late one.
686
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000687 addPass(createAMDGPUCodeGenPreparePass());
Matt Arsenault0c329382017-01-30 18:40:29 +0000688 }
689
Tom Stellardfd253952015-08-07 23:19:30 +0000690 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
Matt Arsenault432aaea2018-05-13 10:04:48 +0000691 if (TM.getTargetTriple().getArch() == Triple::r600)
692 addPass(createR600OpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000693
Yaxun Liude4b88d2017-10-10 19:39:48 +0000694 // Replace OpenCL enqueued block function pointers with global variables.
695 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
696
Matt Arsenault03d85842016-06-27 20:32:13 +0000697 if (TM.getOptLevel() > CodeGenOpt::None) {
Matt Arsenault417e0072017-02-08 06:16:04 +0000698 addPass(createInferAddressSpacesPass());
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000699 addPass(createAMDGPUPromoteAlloca());
Matt Arsenault03d85842016-06-27 20:32:13 +0000700
701 if (EnableSROA)
702 addPass(createSROAPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000703
Nikita Popov3db93ac2019-04-07 17:22:16 +0000704 if (EnableScalarIRPasses)
705 addStraightLineScalarOptimizationPasses();
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000706
707 if (EnableAMDGPUAliasAnalysis) {
708 addPass(createAMDGPUAAWrapperPass());
709 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
710 AAResults &AAR) {
711 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
712 AAR.addAAResult(WrapperPass->getResult());
713 }));
714 }
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000715 }
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000716
717 TargetPassConfig::addIRPasses();
718
719 // EarlyCSE is not always strong enough to clean up what LSR produces. For
720 // example, GVN can combine
721 //
722 // %0 = add %a, %b
723 // %1 = add %b, %a
724 //
725 // and
726 //
727 // %0 = shl nsw %a, 2
728 // %1 = shl %a, 2
729 //
730 // but EarlyCSE can do neither of them.
Nikita Popov3db93ac2019-04-07 17:22:16 +0000731 if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses)
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000732 addEarlyCSEOrGVNPass();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000733}
734
Matt Arsenault908b9e22016-07-01 03:33:52 +0000735void AMDGPUPassConfig::addCodeGenPrepare() {
Aakanksha Patilc56d2af2019-03-07 00:54:04 +0000736 if (TM->getTargetTriple().getArch() == Triple::amdgcn)
737 addPass(createAMDGPUAnnotateKernelFeaturesPass());
738
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000739 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
740 EnableLowerKernelArguments)
741 addPass(createAMDGPULowerKernelArgumentsPass());
742
Matt Arsenaulte7e23e32019-07-05 20:26:13 +0000743 addPass(&AMDGPUPerfHintAnalysisID);
744
Matt Arsenault908b9e22016-07-01 03:33:52 +0000745 TargetPassConfig::addCodeGenPrepare();
746
747 if (EnableLoadStoreVectorizer)
748 addPass(createLoadStoreVectorizerPass());
749}
750
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000751bool AMDGPUPassConfig::addPreISel() {
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +0000752 addPass(createLowerSwitchPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000753 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000754 return false;
755}
756
757bool AMDGPUPassConfig::addInstSelector() {
Matt Arsenault9cac4e62019-06-19 00:25:39 +0000758 // Defer the verifier until FinalizeISel.
759 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000760 return false;
761}
762
Matt Arsenault0a109002015-09-25 17:41:20 +0000763bool AMDGPUPassConfig::addGCPasses() {
764 // Do nothing. GC is not supported.
765 return false;
766}
767
Tom Stellard45bb48e2015-06-13 03:28:10 +0000768//===----------------------------------------------------------------------===//
769// R600 Pass Setup
770//===----------------------------------------------------------------------===//
771
772bool R600PassConfig::addPreISel() {
773 AMDGPUPassConfig::addPreISel();
Matt Arsenaultc5816112016-06-24 06:30:22 +0000774
775 if (EnableR600StructurizeCFG)
Tom Stellardbc4497b2016-02-12 23:45:29 +0000776 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000777 return false;
778}
779
Tom Stellard20287692017-08-08 04:57:55 +0000780bool R600PassConfig::addInstSelector() {
781 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
782 return false;
783}
784
Tom Stellard45bb48e2015-06-13 03:28:10 +0000785void R600PassConfig::addPreRegAlloc() {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000786 addPass(createR600VectorRegMerger());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000787}
788
789void R600PassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000790 addPass(createR600EmitClauseMarkers(), false);
Matt Arsenault03d85842016-06-27 20:32:13 +0000791 if (EnableR600IfConvert)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000792 addPass(&IfConverterID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000793 addPass(createR600ClauseMergePass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000794}
795
796void R600PassConfig::addPreEmitPass() {
797 addPass(createAMDGPUCFGStructurizerPass(), false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000798 addPass(createR600ExpandSpecialInstrsPass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000799 addPass(&FinalizeMachineBundlesID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000800 addPass(createR600Packetizer(), false);
801 addPass(createR600ControlFlowFinalizer(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000802}
803
804TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +0000805 return new R600PassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000806}
807
808//===----------------------------------------------------------------------===//
809// GCN Pass Setup
810//===----------------------------------------------------------------------===//
811
Matt Arsenault03d85842016-06-27 20:32:13 +0000812ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
813 MachineSchedContext *C) const {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000814 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000815 if (ST.enableSIScheduler())
816 return createSIMachineScheduler(C);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000817 return createGCNMaxOccupancyMachineScheduler(C);
Matt Arsenault03d85842016-06-27 20:32:13 +0000818}
819
Tom Stellard45bb48e2015-06-13 03:28:10 +0000820bool GCNPassConfig::addPreISel() {
821 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000822
Neil Henning66416572018-10-08 15:49:19 +0000823 if (EnableAtomicOptimizations) {
824 addPass(createAMDGPUAtomicOptimizerPass());
825 }
826
Matt Arsenault39319482015-11-06 18:01:57 +0000827 // FIXME: We need to run a pass to propagate the attributes when calls are
828 // supported.
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000829
830 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
831 // regions formed by them.
832 addPass(&AMDGPUUnifyDivergentExitNodesID);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000833 if (!LateCFGStructurize) {
834 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
835 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000836 addPass(createSinkingPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000837 addPass(createAMDGPUAnnotateUniformValues());
Jan Sjodina06bfe02017-05-15 20:18:37 +0000838 if (!LateCFGStructurize) {
839 addPass(createSIAnnotateControlFlowPass());
840 }
Alexander Timofeev2ce560f2019-07-02 17:59:44 +0000841 addPass(createLCSSAPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000842
Tom Stellard45bb48e2015-06-13 03:28:10 +0000843 return false;
844}
845
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000846void GCNPassConfig::addMachineSSAOptimization() {
847 TargetPassConfig::addMachineSSAOptimization();
848
849 // We want to fold operands after PeepholeOptimizer has run (or as part of
850 // it), because it will eliminate extra copies making it easier to fold the
851 // real source operand. We want to eliminate dead instructions after, so that
852 // we see fewer uses of the copies. We then need to clean up the dead
853 // instructions leftover after the operands are folded as well.
854 //
855 // XXX - Can we get away without running DeadMachineInstructionElim again?
856 addPass(&SIFoldOperandsID);
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000857 if (EnableDPPCombine)
858 addPass(&GCNDPPCombineID);
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000859 addPass(&DeadMachineInstructionElimID);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000860 addPass(&SILoadStoreOptimizerID);
Sam Kolton6e795292017-04-07 10:53:12 +0000861 if (EnableSDWAPeephole) {
862 addPass(&SIPeepholeSDWAID);
Matthias Braun4a7c8e72018-01-19 06:46:10 +0000863 addPass(&EarlyMachineLICMID);
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000864 addPass(&MachineCSEID);
865 addPass(&SIFoldOperandsID);
Sam Kolton6e795292017-04-07 10:53:12 +0000866 addPass(&DeadMachineInstructionElimID);
867 }
Stanislav Mekhanoshin03306602017-06-03 17:39:47 +0000868 addPass(createSIShrinkInstructionsPass());
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000869}
870
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000871bool GCNPassConfig::addILPOpts() {
872 if (EnableEarlyIfConversion)
873 addPass(&EarlyIfConverterID);
874
875 TargetPassConfig::addILPOpts();
876 return false;
877}
878
Tom Stellard45bb48e2015-06-13 03:28:10 +0000879bool GCNPassConfig::addInstSelector() {
880 AMDGPUPassConfig::addInstSelector();
Matt Arsenault782c03b2015-11-03 22:30:13 +0000881 addPass(&SIFixSGPRCopiesID);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000882 addPass(createSILowerI1CopiesPass());
Ron Liebermancac749a2018-11-16 01:13:34 +0000883 addPass(createSIFixupVectorISelPass());
David Stuttardf77079f2019-01-14 11:55:24 +0000884 addPass(createSIAddIMGInitPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000885 return false;
886}
887
Tom Stellard000c5af2016-04-14 19:09:28 +0000888bool GCNPassConfig::addIRTranslator() {
889 addPass(new IRTranslator());
890 return false;
891}
892
Tim Northover33b07d62016-07-22 20:03:43 +0000893bool GCNPassConfig::addLegalizeMachineIR() {
Tom Stellardca166212017-01-30 21:56:46 +0000894 addPass(new Legalizer());
Tim Northover33b07d62016-07-22 20:03:43 +0000895 return false;
896}
897
Tom Stellard000c5af2016-04-14 19:09:28 +0000898bool GCNPassConfig::addRegBankSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000899 addPass(new RegBankSelect());
Tom Stellard000c5af2016-04-14 19:09:28 +0000900 return false;
901}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000902
903bool GCNPassConfig::addGlobalInstructionSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000904 addPass(new InstructionSelect());
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000905 return false;
906}
Tom Stellardca166212017-01-30 21:56:46 +0000907
Tom Stellard45bb48e2015-06-13 03:28:10 +0000908void GCNPassConfig::addPreRegAlloc() {
Jan Sjodina06bfe02017-05-15 20:18:37 +0000909 if (LateCFGStructurize) {
910 addPass(createAMDGPUMachineCFGStructurizerPass());
911 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000912 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000913}
914
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000915void GCNPassConfig::addFastRegAlloc() {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000916 // FIXME: We have to disable the verifier here because of PHIElimination +
917 // TwoAddressInstructions disabling it.
Matt Arsenaulte6740752016-09-29 01:44:16 +0000918
919 // This must be run immediately after phi elimination and before
920 // TwoAddressInstructions, otherwise the processing of the tied operand of
921 // SI_ELSE will introduce a copy of the tied operand source after the else.
922 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000923
Neil Henning0a30f332019-04-01 15:19:52 +0000924 // This must be run just after RegisterCoalescing.
925 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
Connor Abbott92638ab2017-08-04 18:36:52 +0000926
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000927 TargetPassConfig::addFastRegAlloc();
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000928}
929
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000930void GCNPassConfig::addOptimizedRegAlloc() {
Matt Arsenault4d47ac32019-03-27 16:58:30 +0000931 if (OptExecMaskPreRA) {
932 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
933 insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
934 } else {
935 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
936 }
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000937
Matt Arsenaulte6740752016-09-29 01:44:16 +0000938 // This must be run immediately after phi elimination and before
939 // TwoAddressInstructions, otherwise the processing of the tied operand of
940 // SI_ELSE will introduce a copy of the tied operand source after the else.
941 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000942
Neil Henning0a30f332019-04-01 15:19:52 +0000943 // This must be run just after RegisterCoalescing.
944 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
Connor Abbott92638ab2017-08-04 18:36:52 +0000945
Stanislav Mekhanoshinc8f78f82019-04-05 20:11:32 +0000946 if (EnableDCEInRA)
947 insertPass(&RenameIndependentSubregsID, &DeadMachineInstructionElimID);
948
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000949 TargetPassConfig::addOptimizedRegAlloc();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000950}
951
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000952bool GCNPassConfig::addPreRewrite() {
953 if (EnableRegReassign) {
954 addPass(&GCNNSAReassignID);
Stanislav Mekhanoshin3b7925f2019-05-01 16:49:31 +0000955 addPass(&GCNRegBankReassignID);
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000956 }
957 return true;
958}
959
Matt Arsenaulte6740752016-09-29 01:44:16 +0000960void GCNPassConfig::addPostRegAlloc() {
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000961 addPass(&SIFixVGPRCopiesID);
Matt Arsenault105fc1a2018-11-26 17:02:02 +0000962 if (getOptLevel() > CodeGenOpt::None)
963 addPass(&SIOptimizeExecMaskingID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000964 TargetPassConfig::addPostRegAlloc();
Matt Arsenault5b0922f2019-07-03 23:32:29 +0000965
966 // Equivalent of PEI for SGPRs.
967 addPass(&SILowerSGPRSpillsID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000968}
969
Tom Stellard45bb48e2015-06-13 03:28:10 +0000970void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000971}
972
973void GCNPassConfig::addPreEmitPass() {
Mark Searles72da47d2018-07-16 10:02:41 +0000974 addPass(createSIMemoryLegalizerPass());
975 addPass(createSIInsertWaitcntsPass());
976 addPass(createSIShrinkInstructionsPass());
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000977 addPass(createSIModeRegisterPass());
Mark Searles72da47d2018-07-16 10:02:41 +0000978
Tom Stellardcb6ba622016-04-30 00:23:06 +0000979 // The hazard recognizer that runs as part of the post-ra scheduler does not
Matt Arsenault254a6452016-06-28 16:59:53 +0000980 // guarantee to be able handle all hazards correctly. This is because if there
981 // are multiple scheduling regions in a basic block, the regions are scheduled
982 // bottom up, so when we begin to schedule a region we don't know what
983 // instructions were emitted directly before it.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000984 //
Matt Arsenault254a6452016-06-28 16:59:53 +0000985 // Here we add a stand-alone hazard recognizer pass which can handle all
986 // cases.
Mark Searles72da47d2018-07-16 10:02:41 +0000987 //
988 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
989 // be better for it to emit S_NOP <N> when possible.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000990 addPass(&PostRAHazardRecognizerID);
991
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000992 addPass(&SIInsertSkipsPassID);
Matt Arsenault6bc43d82016-10-06 16:20:41 +0000993 addPass(&BranchRelaxationPassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000994}
995
996TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +0000997 return new GCNPassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000998}
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +0000999
1000yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
1001 return new yaml::SIMachineFunctionInfo();
1002}
1003
1004yaml::MachineFunctionInfo *
1005GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
1006 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1007 return new yaml::SIMachineFunctionInfo(*MFI,
1008 *MF.getSubtarget().getRegisterInfo());
1009}
1010
1011bool GCNTargetMachine::parseMachineFunctionInfo(
1012 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
1013 SMDiagnostic &Error, SMRange &SourceRange) const {
1014 const yaml::SIMachineFunctionInfo &YamlMFI =
1015 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1016 MachineFunction &MF = PFS.MF;
1017 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1018
1019 MFI->initializeBaseYamlFields(YamlMFI);
1020
1021 auto parseRegister = [&](const yaml::StringValue &RegName, unsigned &RegVal) {
1022 if (parseNamedRegisterReference(PFS, RegVal, RegName.Value, Error)) {
1023 SourceRange = RegName.SourceRange;
1024 return true;
1025 }
1026
1027 return false;
1028 };
1029
1030 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1031 // Create a diagnostic for a the register string literal.
1032 const MemoryBuffer &Buffer =
1033 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1034 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1035 RegName.Value.size(), SourceMgr::DK_Error,
1036 "incorrect register class for field", RegName.Value,
1037 None, None);
1038 SourceRange = RegName.SourceRange;
1039 return true;
1040 };
1041
1042 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1043 parseRegister(YamlMFI.ScratchWaveOffsetReg, MFI->ScratchWaveOffsetReg) ||
1044 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1045 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1046 return true;
1047
1048 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1049 !AMDGPU::SReg_128RegClass.contains(MFI->ScratchRSrcReg)) {
1050 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1051 }
1052
1053 if (MFI->ScratchWaveOffsetReg != AMDGPU::SCRATCH_WAVE_OFFSET_REG &&
1054 !AMDGPU::SGPR_32RegClass.contains(MFI->ScratchWaveOffsetReg)) {
1055 return diagnoseRegisterClass(YamlMFI.ScratchWaveOffsetReg);
1056 }
1057
1058 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1059 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1060 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1061 }
1062
1063 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1064 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1065 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1066 }
1067
Michael Liao80177ca2019-07-03 02:00:21 +00001068 auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A,
1069 const TargetRegisterClass &RC,
1070 ArgDescriptor &Arg) {
1071 // Skip parsing if it's not present.
1072 if (!A)
1073 return false;
1074
1075 if (A->IsRegister) {
1076 unsigned Reg;
1077 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value,
1078 Error)) {
1079 SourceRange = A->RegisterName.SourceRange;
1080 return true;
1081 }
1082 if (!RC.contains(Reg))
1083 return diagnoseRegisterClass(A->RegisterName);
1084 Arg = ArgDescriptor::createRegister(Reg);
1085 } else
1086 Arg = ArgDescriptor::createStack(A->StackOffset);
1087 // Check and apply the optional mask.
1088 if (A->Mask)
1089 Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
1090
1091 return false;
1092 };
1093
1094 if (YamlMFI.ArgInfo &&
1095 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1096 AMDGPU::SReg_128RegClass,
1097 MFI->ArgInfo.PrivateSegmentBuffer) ||
1098 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1099 AMDGPU::SReg_64RegClass,
1100 MFI->ArgInfo.DispatchPtr) ||
1101 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1102 MFI->ArgInfo.QueuePtr) ||
1103 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1104 AMDGPU::SReg_64RegClass,
1105 MFI->ArgInfo.KernargSegmentPtr) ||
1106 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1107 AMDGPU::SReg_64RegClass,
1108 MFI->ArgInfo.DispatchID) ||
1109 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1110 AMDGPU::SReg_64RegClass,
1111 MFI->ArgInfo.FlatScratchInit) ||
1112 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1113 AMDGPU::SGPR_32RegClass,
1114 MFI->ArgInfo.PrivateSegmentSize) ||
1115 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1116 AMDGPU::SGPR_32RegClass,
1117 MFI->ArgInfo.WorkGroupIDX) ||
1118 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1119 AMDGPU::SGPR_32RegClass,
1120 MFI->ArgInfo.WorkGroupIDY) ||
1121 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1122 AMDGPU::SGPR_32RegClass,
1123 MFI->ArgInfo.WorkGroupIDZ) ||
1124 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1125 AMDGPU::SGPR_32RegClass,
1126 MFI->ArgInfo.WorkGroupInfo) ||
1127 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1128 AMDGPU::SGPR_32RegClass,
1129 MFI->ArgInfo.PrivateSegmentWaveByteOffset) ||
1130 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1131 AMDGPU::SReg_64RegClass,
1132 MFI->ArgInfo.ImplicitArgPtr) ||
1133 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1134 AMDGPU::SReg_64RegClass,
1135 MFI->ArgInfo.ImplicitBufferPtr) ||
1136 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1137 AMDGPU::VGPR_32RegClass,
1138 MFI->ArgInfo.WorkItemIDX) ||
1139 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1140 AMDGPU::VGPR_32RegClass,
1141 MFI->ArgInfo.WorkItemIDY) ||
1142 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1143 AMDGPU::VGPR_32RegClass,
1144 MFI->ArgInfo.WorkItemIDZ)))
1145 return true;
1146
Matt Arsenault58426a32019-07-10 16:09:26 +00001147 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1148 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1149
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +00001150 return false;
1151}