blob: 3c41ecdf597906eb4d029811c55a042affc95a1c [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// The AMDGPU target machine contains all of the hardware specific
Tom Stellard45bb48e2015-06-13 03:28:10 +000011/// information needed to emit code for R600 and SI GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +000017#include "AMDGPUAliasAnalysis.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000018#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000019#include "AMDGPUInstructionSelector.h"
20#include "AMDGPULegalizerInfo.h"
Matt Arsenault9aa45f02017-07-06 20:57:05 +000021#include "AMDGPUMacroFusion.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000022#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "AMDGPUTargetTransformInfo.h"
Valery Pykhtinfd4c4102017-03-21 13:15:46 +000024#include "GCNIterativeScheduler.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000025#include "GCNSchedStrategy.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "R600MachineScheduler.h"
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000027#include "SIMachineFunctionInfo.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000028#include "SIMachineScheduler.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000029#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000030#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
Tom Stellardca166212017-01-30 21:56:46 +000031#include "llvm/CodeGen/GlobalISel/Legalizer.h"
32#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000033#include "llvm/CodeGen/MIRParser/MIParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/CodeGen/Passes.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000035#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000036#include "llvm/IR/Attributes.h"
37#include "llvm/IR/Function.h"
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +000038#include "llvm/IR/LegacyPassManager.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000039#include "llvm/Pass.h"
40#include "llvm/Support/CommandLine.h"
41#include "llvm/Support/Compiler.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000042#include "llvm/Support/TargetRegistry.h"
David Blaikie6054e652018-03-23 23:58:19 +000043#include "llvm/Target/TargetLoweringObjectFile.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000044#include "llvm/Transforms/IPO.h"
45#include "llvm/Transforms/IPO/AlwaysInliner.h"
46#include "llvm/Transforms/IPO/PassManagerBuilder.h"
47#include "llvm/Transforms/Scalar.h"
48#include "llvm/Transforms/Scalar/GVN.h"
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +000049#include "llvm/Transforms/Utils.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000050#include "llvm/Transforms/Vectorize.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000051#include <memory>
Tom Stellard45bb48e2015-06-13 03:28:10 +000052
53using namespace llvm;
54
Matt Arsenaultc5816112016-06-24 06:30:22 +000055static cl::opt<bool> EnableR600StructurizeCFG(
56 "r600-ir-structurize",
57 cl::desc("Use StructurizeCFG IR pass"),
58 cl::init(true));
59
Matt Arsenault03d85842016-06-27 20:32:13 +000060static cl::opt<bool> EnableSROA(
61 "amdgpu-sroa",
62 cl::desc("Run SROA after promote alloca pass"),
63 cl::ReallyHidden,
64 cl::init(true));
65
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +000066static cl::opt<bool>
67EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
68 cl::desc("Run early if-conversion"),
69 cl::init(false));
70
Matt Arsenault4d47ac32019-03-27 16:58:30 +000071static cl::opt<bool>
72OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
73 cl::desc("Run pre-RA exec mask optimizations"),
74 cl::init(true));
75
Matt Arsenault03d85842016-06-27 20:32:13 +000076static cl::opt<bool> EnableR600IfConvert(
77 "r600-if-convert",
78 cl::desc("Use if conversion pass"),
79 cl::ReallyHidden,
80 cl::init(true));
81
Matt Arsenault908b9e22016-07-01 03:33:52 +000082// Option to disable vectorizer for tests.
83static cl::opt<bool> EnableLoadStoreVectorizer(
84 "amdgpu-load-store-vectorizer",
85 cl::desc("Enable load store vectorizer"),
Matt Arsenault0efdd062016-09-09 22:29:28 +000086 cl::init(true),
Matt Arsenault908b9e22016-07-01 03:33:52 +000087 cl::Hidden);
88
Hiroshi Inouec8e92452018-01-29 05:17:03 +000089// Option to control global loads scalarization
Alexander Timofeev18009562016-12-08 17:28:47 +000090static cl::opt<bool> ScalarizeGlobal(
91 "amdgpu-scalarize-global-loads",
92 cl::desc("Enable global load scalarization"),
Alexander Timofeev982aee62017-07-04 17:32:00 +000093 cl::init(true),
Alexander Timofeev18009562016-12-08 17:28:47 +000094 cl::Hidden);
95
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +000096// Option to run internalize pass.
97static cl::opt<bool> InternalizeSymbols(
98 "amdgpu-internalize-symbols",
99 cl::desc("Enable elimination of non-kernel functions and unused globals"),
100 cl::init(false),
101 cl::Hidden);
102
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000103// Option to inline all early.
104static cl::opt<bool> EarlyInlineAll(
105 "amdgpu-early-inline-all",
106 cl::desc("Inline all functions early"),
107 cl::init(false),
108 cl::Hidden);
109
Sam Koltonf60ad582017-03-21 12:51:34 +0000110static cl::opt<bool> EnableSDWAPeephole(
111 "amdgpu-sdwa-peephole",
112 cl::desc("Enable SDWA peepholer"),
Sam Kolton9fa16962017-04-06 15:03:28 +0000113 cl::init(true));
Sam Koltonf60ad582017-03-21 12:51:34 +0000114
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000115static cl::opt<bool> EnableDPPCombine(
116 "amdgpu-dpp-combine",
117 cl::desc("Enable DPP combiner"),
Valery Pykhtinded96df2019-02-11 11:15:03 +0000118 cl::init(true));
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000119
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000120// Enable address space based alias analysis
121static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
122 cl::desc("Enable AMDGPU Alias Analysis"),
123 cl::init(true));
124
Jan Sjodina06bfe02017-05-15 20:18:37 +0000125// Option to run late CFG structurizer
Matt Arsenaultcc852232017-10-10 20:22:07 +0000126static cl::opt<bool, true> LateCFGStructurize(
Jan Sjodina06bfe02017-05-15 20:18:37 +0000127 "amdgpu-late-structurize",
128 cl::desc("Enable late CFG structurization"),
Matt Arsenaultcc852232017-10-10 20:22:07 +0000129 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
Jan Sjodina06bfe02017-05-15 20:18:37 +0000130 cl::Hidden);
131
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000132static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt(
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000133 "amdgpu-function-calls",
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000134 cl::desc("Enable AMDGPU function call support"),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000135 cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000136 cl::init(true),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000137 cl::Hidden);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000138
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000139// Enable lib calls simplifications
140static cl::opt<bool> EnableLibCallSimplify(
141 "amdgpu-simplify-libcall",
Matt Arsenault2e4d3382018-05-29 19:35:46 +0000142 cl::desc("Enable amdgpu library simplifications"),
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000143 cl::init(true),
144 cl::Hidden);
145
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000146static cl::opt<bool> EnableLowerKernelArguments(
147 "amdgpu-ir-lower-kernel-arguments",
148 cl::desc("Lower kernel argument loads in IR pass"),
149 cl::init(true),
150 cl::Hidden);
151
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000152static cl::opt<bool> EnableRegReassign(
153 "amdgpu-reassign-regs",
154 cl::desc("Enable register reassign optimizations on gfx10+"),
155 cl::init(true),
156 cl::Hidden);
157
Neil Henning66416572018-10-08 15:49:19 +0000158// Enable atomic optimization
159static cl::opt<bool> EnableAtomicOptimizations(
160 "amdgpu-atomic-optimizations",
161 cl::desc("Enable atomic optimizations"),
162 cl::init(false),
163 cl::Hidden);
164
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000165// Enable Mode register optimization
166static cl::opt<bool> EnableSIModeRegisterPass(
167 "amdgpu-mode-register",
168 cl::desc("Enable mode register pass"),
169 cl::init(true),
170 cl::Hidden);
171
Stanislav Mekhanoshinc8f78f82019-04-05 20:11:32 +0000172// Option is used in lit tests to prevent deadcoding of patterns inspected.
173static cl::opt<bool>
174EnableDCEInRA("amdgpu-dce-in-ra",
175 cl::init(true), cl::Hidden,
176 cl::desc("Enable machine DCE inside regalloc"));
177
Nikita Popov3db93ac2019-04-07 17:22:16 +0000178static cl::opt<bool> EnableScalarIRPasses(
179 "amdgpu-scalar-ir-passes",
180 cl::desc("Enable scalar IR passes"),
181 cl::init(true),
182 cl::Hidden);
183
Tom Stellard45bb48e2015-06-13 03:28:10 +0000184extern "C" void LLVMInitializeAMDGPUTarget() {
185 // Register the target
Mehdi Aminif42454b2016-10-09 23:00:34 +0000186 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
187 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000188
189 PassRegistry *PR = PassRegistry::getPassRegistry();
Tom Stellarda2f57be2017-08-02 22:19:45 +0000190 initializeR600ClauseMergePassPass(*PR);
191 initializeR600ControlFlowFinalizerPass(*PR);
192 initializeR600PacketizerPass(*PR);
193 initializeR600ExpandSpecialInstrsPassPass(*PR);
194 initializeR600VectorRegMergerPass(*PR);
Tom Stellarde753c522018-04-09 16:09:13 +0000195 initializeGlobalISel(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000196 initializeAMDGPUDAGToDAGISelPass(*PR);
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000197 initializeGCNDPPCombinePass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000198 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +0000199 initializeSIFixSGPRCopiesPass(*PR);
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000200 initializeSIFixVGPRCopiesPass(*PR);
Ron Liebermancac749a2018-11-16 01:13:34 +0000201 initializeSIFixupVectorISelPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000202 initializeSIFoldOperandsPass(*PR);
Sam Koltonf60ad582017-03-21 12:51:34 +0000203 initializeSIPeepholeSDWAPass(*PR);
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +0000204 initializeSIShrinkInstructionsPass(*PR);
Stanislav Mekhanoshin37e7f952017-08-01 23:14:32 +0000205 initializeSIOptimizeExecMaskingPreRAPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +0000206 initializeSILoadStoreOptimizerPass(*PR);
Scott Linder11ef7982018-10-26 13:18:36 +0000207 initializeAMDGPUFixFunctionBitcastsPass(*PR);
Matt Arsenault746e0652017-06-02 18:02:42 +0000208 initializeAMDGPUAlwaysInlinePass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +0000209 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +0000210 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000211 initializeAMDGPUArgumentUsageInfoPass(*PR);
Neil Henning66416572018-10-08 15:49:19 +0000212 initializeAMDGPUAtomicOptimizerPass(*PR);
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000213 initializeAMDGPULowerKernelArgumentsPass(*PR);
Matt Arsenault372d7962018-05-18 21:35:00 +0000214 initializeAMDGPULowerKernelAttributesPass(*PR);
Matt Arsenault0699ef32017-02-09 22:00:42 +0000215 initializeAMDGPULowerIntrinsicsPass(*PR);
Yaxun Liude4b88d2017-10-10 19:39:48 +0000216 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000217 initializeAMDGPUPromoteAllocaPass(*PR);
Matt Arsenault86de4862016-06-24 07:07:55 +0000218 initializeAMDGPUCodeGenPreparePass(*PR);
Matt Arsenaultc06574f2017-07-28 18:40:05 +0000219 initializeAMDGPURewriteOutArgumentsPass(*PR);
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000220 initializeAMDGPUUnifyMetadataPass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +0000221 initializeSIAnnotateControlFlowPass(*PR);
Kannan Narayananacb089e2017-04-12 03:25:12 +0000222 initializeSIInsertWaitcntsPass(*PR);
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000223 initializeSIModeRegisterPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000224 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000225 initializeSILowerControlFlowPass(*PR);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000226 initializeSIInsertSkipsPass(*PR);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000227 initializeSIMemoryLegalizerPass(*PR);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000228 initializeSIOptimizeExecMaskingPass(*PR);
Neil Henning0a30f332019-04-01 15:19:52 +0000229 initializeSIPreAllocateWWMRegsPass(*PR);
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000230 initializeSIFormMemoryClausesPass(*PR);
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000231 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000232 initializeAMDGPUAAWrapperPassPass(*PR);
Matt Arsenault8ba740a2018-11-07 20:26:42 +0000233 initializeAMDGPUExternalAAWrapperPass(*PR);
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000234 initializeAMDGPUUseNativeCallsPass(*PR);
235 initializeAMDGPUSimplifyLibCallsPass(*PR);
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000236 initializeAMDGPUInlinerPass(*PR);
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000237 initializeGCNNSAReassignPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000238}
239
Tom Stellarde135ffd2015-09-25 21:41:28 +0000240static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000241 return llvm::make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000242}
243
Tom Stellard45bb48e2015-06-13 03:28:10 +0000244static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000245 return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000246}
247
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +0000248static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
249 return new SIScheduleDAGMI(C);
250}
251
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000252static ScheduleDAGInstrs *
253createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
254 ScheduleDAGMILive *DAG =
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000255 new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
Matthias Braun115efcd2016-11-28 20:11:54 +0000256 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
257 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
Matt Arsenault9aa45f02017-07-06 20:57:05 +0000258 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000259 return DAG;
260}
261
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000262static ScheduleDAGInstrs *
263createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
264 auto DAG = new GCNIterativeScheduler(C,
265 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
266 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
267 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
268 return DAG;
269}
270
271static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
272 return new GCNIterativeScheduler(C,
273 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
274}
275
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000276static ScheduleDAGInstrs *
277createIterativeILPMachineScheduler(MachineSchedContext *C) {
278 auto DAG = new GCNIterativeScheduler(C,
279 GCNIterativeScheduler::SCHEDULE_ILP);
280 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
281 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
282 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
283 return DAG;
284}
285
Tom Stellard45bb48e2015-06-13 03:28:10 +0000286static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +0000287R600SchedRegistry("r600", "Run R600's custom scheduler",
288 createR600MachineScheduler);
289
290static MachineSchedRegistry
291SISchedRegistry("si", "Run SI's custom scheduler",
292 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000293
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000294static MachineSchedRegistry
295GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
296 "Run GCN scheduler to maximize occupancy",
297 createGCNMaxOccupancyMachineScheduler);
298
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000299static MachineSchedRegistry
300IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
301 "Run GCN scheduler to maximize occupancy (experimental)",
302 createIterativeGCNMaxOccupancyMachineScheduler);
303
304static MachineSchedRegistry
305GCNMinRegSchedRegistry("gcn-minreg",
306 "Run GCN iterative scheduler for minimal register usage (experimental)",
307 createMinRegScheduler);
308
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000309static MachineSchedRegistry
310GCNILPSchedRegistry("gcn-ilp",
311 "Run GCN iterative scheduler for ILP scheduling (experimental)",
312 createIterativeILPMachineScheduler);
313
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000314static StringRef computeDataLayout(const Triple &TT) {
315 if (TT.getArch() == Triple::r600) {
316 // 32-bit pointers.
Yaxun Liucc56a8b2017-11-06 14:32:33 +0000317 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Matt Arsenault95329f82018-03-27 19:26:40 +0000318 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000319 }
320
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000321 // 32-bit private, local, and region pointers. 64-bit global, constant and
Neil Henning523dab02019-03-18 14:44:28 +0000322 // flat, non-integral buffer fat pointers.
Yaxun Liu0124b542018-02-13 18:00:25 +0000323 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000324 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Neil Henning523dab02019-03-18 14:44:28 +0000325 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
326 "-ni:7";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000327}
328
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000329LLVM_READNONE
330static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
331 if (!GPU.empty())
332 return GPU;
333
Matt Arsenaulte0c1f9e2019-03-17 21:31:35 +0000334 // Need to default to a target with flat support for HSA.
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000335 if (TT.getArch() == Triple::amdgcn)
Matt Arsenaulte0c1f9e2019-03-17 21:31:35 +0000336 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000337
Matt Arsenault8e001942016-06-02 18:37:16 +0000338 return "r600";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000339}
340
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000341static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
Tom Stellard418beb72016-07-13 14:23:33 +0000342 // The AMDGPU toolchain only supports generating shared objects, so we
343 // must always use PIC.
344 return Reloc::PIC_;
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000345}
346
Tom Stellard45bb48e2015-06-13 03:28:10 +0000347AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
348 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000349 TargetOptions Options,
350 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000351 Optional<CodeModel::Model> CM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000352 CodeGenOpt::Level OptLevel)
Matthias Braunbb8507e2017-10-12 22:57:28 +0000353 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
354 FS, Options, getEffectiveRelocModel(RM),
David Greenca29c272018-12-07 12:10:23 +0000355 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
Rafael Espindola79e238a2017-08-03 02:16:21 +0000356 TLOF(createTLOF(getTargetTriple())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000357 initAsmInfo();
358}
359
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000360bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
Matt Arsenaulta6801992018-07-10 14:03:41 +0000361bool AMDGPUTargetMachine::EnableFunctionCalls = false;
362
363AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000364
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000365StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
366 Attribute GPUAttr = F.getFnAttribute("target-cpu");
367 return GPUAttr.hasAttribute(Attribute::None) ?
368 getTargetCPU() : GPUAttr.getValueAsString();
369}
370
371StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
372 Attribute FSAttr = F.getFnAttribute("target-features");
373
374 return FSAttr.hasAttribute(Attribute::None) ?
375 getTargetFeatureString() :
376 FSAttr.getValueAsString();
377}
378
Matt Arsenaulte745d992017-09-19 07:40:11 +0000379/// Predicate for Internalize pass.
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000380static bool mustPreserveGV(const GlobalValue &GV) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000381 if (const Function *F = dyn_cast<Function>(&GV))
382 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
383
384 return !GV.use_empty();
385}
386
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000387void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
Stanislav Mekhanoshinee2dd782017-03-17 17:13:41 +0000388 Builder.DivergentTarget = true;
389
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000390 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
Matt Arsenaulte745d992017-09-19 07:40:11 +0000391 bool Internalize = InternalizeSymbols;
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000392 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000393 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
394 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000395
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000396 if (EnableFunctionCalls) {
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000397 delete Builder.Inliner;
Stanislav Mekhanoshin56418202017-09-20 06:10:15 +0000398 Builder.Inliner = createAMDGPUFunctionInliningPass();
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000399 }
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000400
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000401 Builder.addExtension(
Stanislav Mekhanoshinf6c1feb2017-01-27 16:38:10 +0000402 PassManagerBuilder::EP_ModuleOptimizerEarly,
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000403 [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &,
404 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000405 if (AMDGPUAA) {
406 PM.add(createAMDGPUAAWrapperPass());
407 PM.add(createAMDGPUExternalAAWrapperPass());
408 }
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000409 PM.add(createAMDGPUUnifyMetadataPass());
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000410 if (Internalize) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000411 PM.add(createInternalizePass(mustPreserveGV));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000412 PM.add(createGlobalDCEPass());
413 }
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000414 if (EarlyInline)
Stanislav Mekhanoshin89653df2017-03-30 20:16:02 +0000415 PM.add(createAMDGPUAlwaysInlinePass(false));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000416 });
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000417
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000418 const auto &Opt = Options;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000419 Builder.addExtension(
420 PassManagerBuilder::EP_EarlyAsPossible,
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000421 [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &,
422 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000423 if (AMDGPUAA) {
424 PM.add(createAMDGPUAAWrapperPass());
425 PM.add(createAMDGPUExternalAAWrapperPass());
426 }
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000427 PM.add(llvm::createAMDGPUUseNativeCallsPass());
428 if (LibCallSimplify)
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000429 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt));
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000430 });
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000431
432 Builder.addExtension(
433 PassManagerBuilder::EP_CGSCCOptimizerLate,
434 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
435 // Add infer address spaces pass to the opt pipeline after inlining
436 // but before SROA to increase SROA opportunities.
437 PM.add(createInferAddressSpacesPass());
Matt Arsenault372d7962018-05-18 21:35:00 +0000438
439 // This should run after inlining to have any chance of doing anything,
440 // and before other cleanup optimizations.
441 PM.add(createAMDGPULowerKernelAttributesPass());
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000442 });
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000443}
444
Tom Stellard45bb48e2015-06-13 03:28:10 +0000445//===----------------------------------------------------------------------===//
446// R600 Target Machine (R600 -> Cayman)
447//===----------------------------------------------------------------------===//
448
449R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000450 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000451 TargetOptions Options,
452 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000453 Optional<CodeModel::Model> CM,
454 CodeGenOpt::Level OL, bool JIT)
455 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000456 setRequiresStructuredCFG(true);
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000457
Matt Arsenault09a09ef2019-02-28 00:52:33 +0000458 // Override the default since calls aren't supported for r600.
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000459 if (EnableFunctionCalls &&
460 EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
461 EnableFunctionCalls = false;
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000462}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000463
464const R600Subtarget *R600TargetMachine::getSubtargetImpl(
465 const Function &F) const {
466 StringRef GPU = getGPUName(F);
467 StringRef FS = getFeatureString(F);
468
469 SmallString<128> SubtargetKey(GPU);
470 SubtargetKey.append(FS);
471
472 auto &I = SubtargetMap[SubtargetKey];
473 if (!I) {
474 // This needs to be done before we create a new subtarget since any
475 // creation will depend on the TM and the code generation flags on the
476 // function that reside in TargetOptions.
477 resetTargetOptions(F);
478 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
479 }
480
481 return I.get();
482}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000483
Tom Stellardc7624312018-05-30 22:55:35 +0000484TargetTransformInfo
485R600TargetMachine::getTargetTransformInfo(const Function &F) {
486 return TargetTransformInfo(R600TTIImpl(this, F));
487}
488
Tom Stellard45bb48e2015-06-13 03:28:10 +0000489//===----------------------------------------------------------------------===//
490// GCN Target Machine (SI+)
491//===----------------------------------------------------------------------===//
492
493GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000494 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000495 TargetOptions Options,
496 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000497 Optional<CodeModel::Model> CM,
498 CodeGenOpt::Level OL, bool JIT)
499 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000500
Tom Stellard5bfbae52018-07-11 20:59:01 +0000501const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000502 StringRef GPU = getGPUName(F);
503 StringRef FS = getFeatureString(F);
504
505 SmallString<128> SubtargetKey(GPU);
506 SubtargetKey.append(FS);
507
508 auto &I = SubtargetMap[SubtargetKey];
509 if (!I) {
510 // This needs to be done before we create a new subtarget since any
511 // creation will depend on the TM and the code generation flags on the
512 // function that reside in TargetOptions.
513 resetTargetOptions(F);
Tom Stellard5bfbae52018-07-11 20:59:01 +0000514 I = llvm::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000515 }
516
Alexander Timofeev18009562016-12-08 17:28:47 +0000517 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
518
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000519 return I.get();
520}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000521
Tom Stellardc7624312018-05-30 22:55:35 +0000522TargetTransformInfo
523GCNTargetMachine::getTargetTransformInfo(const Function &F) {
524 return TargetTransformInfo(GCNTTIImpl(this, F));
525}
526
Tom Stellard45bb48e2015-06-13 03:28:10 +0000527//===----------------------------------------------------------------------===//
528// AMDGPU Pass Setup
529//===----------------------------------------------------------------------===//
530
531namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000532
Tom Stellard45bb48e2015-06-13 03:28:10 +0000533class AMDGPUPassConfig : public TargetPassConfig {
534public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000535 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000536 : TargetPassConfig(TM, PM) {
Matt Arsenault0a109002015-09-25 17:41:20 +0000537 // Exceptions and StackMaps are not supported, so these passes will never do
538 // anything.
539 disablePass(&StackMapLivenessID);
540 disablePass(&FuncletLayoutID);
541 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000542
543 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
544 return getTM<AMDGPUTargetMachine>();
545 }
546
Matthias Braun115efcd2016-11-28 20:11:54 +0000547 ScheduleDAGInstrs *
548 createMachineScheduler(MachineSchedContext *C) const override {
549 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
550 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
551 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
552 return DAG;
553 }
554
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000555 void addEarlyCSEOrGVNPass();
556 void addStraightLineScalarOptimizationPasses();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000557 void addIRPasses() override;
Matt Arsenault908b9e22016-07-01 03:33:52 +0000558 void addCodeGenPrepare() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000559 bool addPreISel() override;
560 bool addInstSelector() override;
561 bool addGCPasses() override;
Amara Emersond1896802019-04-15 04:53:46 +0000562
563 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000564};
565
Amara Emersond1896802019-04-15 04:53:46 +0000566std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const {
567 return getStandardCSEConfigForOpt(TM->getOptLevel());
568}
569
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000570class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000571public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000572 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000573 : AMDGPUPassConfig(TM, PM) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000574
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000575 ScheduleDAGInstrs *createMachineScheduler(
576 MachineSchedContext *C) const override {
577 return createR600MachineScheduler(C);
578 }
579
Tom Stellard45bb48e2015-06-13 03:28:10 +0000580 bool addPreISel() override;
Tom Stellard20287692017-08-08 04:57:55 +0000581 bool addInstSelector() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000582 void addPreRegAlloc() override;
583 void addPreSched2() override;
584 void addPreEmitPass() override;
585};
586
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000587class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000588public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000589 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000590 : AMDGPUPassConfig(TM, PM) {
Matt Arsenaulta2025382017-08-03 23:24:05 +0000591 // It is necessary to know the register usage of the entire call graph. We
592 // allow calls without EnableAMDGPUFunctionCalls if they are marked
593 // noinline, so this is always required.
594 setRequiresCodeGenSCCOrder(true);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000595 }
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000596
597 GCNTargetMachine &getGCNTargetMachine() const {
598 return getTM<GCNTargetMachine>();
599 }
600
601 ScheduleDAGInstrs *
Matt Arsenault03d85842016-06-27 20:32:13 +0000602 createMachineScheduler(MachineSchedContext *C) const override;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000603
Tom Stellard45bb48e2015-06-13 03:28:10 +0000604 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000605 void addMachineSSAOptimization() override;
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000606 bool addILPOpts() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000607 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000608 bool addIRTranslator() override;
Tim Northover33b07d62016-07-22 20:03:43 +0000609 bool addLegalizeMachineIR() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000610 bool addRegBankSelect() override;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000611 bool addGlobalInstructionSelect() override;
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000612 void addFastRegAlloc() override;
613 void addOptimizedRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000614 void addPreRegAlloc() override;
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000615 bool addPreRewrite() override;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000616 void addPostRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000617 void addPreSched2() override;
618 void addPreEmitPass() override;
619};
620
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000621} // end anonymous namespace
Tom Stellard45bb48e2015-06-13 03:28:10 +0000622
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000623void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
624 if (getOptLevel() == CodeGenOpt::Aggressive)
625 addPass(createGVNPass());
626 else
627 addPass(createEarlyCSEPass());
628}
629
630void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
Stanislav Mekhanoshin20d47952018-06-29 16:26:53 +0000631 addPass(createLICMPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000632 addPass(createSeparateConstOffsetFromGEPPass());
633 addPass(createSpeculativeExecutionPass());
634 // ReassociateGEPs exposes more opportunites for SLSR. See
635 // the example in reassociate-geps-and-slsr.ll.
636 addPass(createStraightLineStrengthReducePass());
637 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
638 // EarlyCSE can reuse.
639 addEarlyCSEOrGVNPass();
640 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
641 addPass(createNaryReassociatePass());
642 // NaryReassociate on GEPs creates redundant common expressions, so run
643 // EarlyCSE after it.
644 addPass(createEarlyCSEPass());
645}
646
Tom Stellard45bb48e2015-06-13 03:28:10 +0000647void AMDGPUPassConfig::addIRPasses() {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000648 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
649
Matt Arsenaultbde80342016-05-18 15:41:07 +0000650 // There is no reason to run these.
651 disablePass(&StackMapLivenessID);
652 disablePass(&FuncletLayoutID);
653 disablePass(&PatchableFunctionID);
654
Matt Arsenaultab411932018-10-02 03:50:56 +0000655 addPass(createAtomicExpandPass());
Scott Linder11ef7982018-10-26 13:18:36 +0000656
657 // This must occur before inlining, as the inliner will not look through
658 // bitcast calls.
659 addPass(createAMDGPUFixFunctionBitcastsPass());
660
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000661 addPass(createAMDGPULowerIntrinsicsPass());
Matt Arsenault0699ef32017-02-09 22:00:42 +0000662
Matt Arsenault635d4792018-10-03 02:47:25 +0000663 // Function calls are not supported, so make sure we inline everything.
664 addPass(createAMDGPUAlwaysInlinePass());
665 addPass(createAlwaysInlinerLegacyPass());
666 // We need to add the barrier noop pass, otherwise adding the function
667 // inlining pass will cause all of the PassConfigs passes to be run
668 // one function at a time, which means if we have a nodule with two
669 // functions, then we will generate code for the first function
670 // without ever running any passes on the second.
671 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000672
Matt Arsenault0c329382017-01-30 18:40:29 +0000673 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
674 // TODO: May want to move later or split into an early and late one.
675
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000676 addPass(createAMDGPUCodeGenPreparePass());
Matt Arsenault0c329382017-01-30 18:40:29 +0000677 }
678
Tom Stellardfd253952015-08-07 23:19:30 +0000679 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
Matt Arsenault432aaea2018-05-13 10:04:48 +0000680 if (TM.getTargetTriple().getArch() == Triple::r600)
681 addPass(createR600OpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000682
Yaxun Liude4b88d2017-10-10 19:39:48 +0000683 // Replace OpenCL enqueued block function pointers with global variables.
684 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
685
Matt Arsenault03d85842016-06-27 20:32:13 +0000686 if (TM.getOptLevel() > CodeGenOpt::None) {
Matt Arsenault417e0072017-02-08 06:16:04 +0000687 addPass(createInferAddressSpacesPass());
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000688 addPass(createAMDGPUPromoteAlloca());
Matt Arsenault03d85842016-06-27 20:32:13 +0000689
690 if (EnableSROA)
691 addPass(createSROAPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000692
Nikita Popov3db93ac2019-04-07 17:22:16 +0000693 if (EnableScalarIRPasses)
694 addStraightLineScalarOptimizationPasses();
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000695
696 if (EnableAMDGPUAliasAnalysis) {
697 addPass(createAMDGPUAAWrapperPass());
698 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
699 AAResults &AAR) {
700 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
701 AAR.addAAResult(WrapperPass->getResult());
702 }));
703 }
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000704 }
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000705
706 TargetPassConfig::addIRPasses();
707
708 // EarlyCSE is not always strong enough to clean up what LSR produces. For
709 // example, GVN can combine
710 //
711 // %0 = add %a, %b
712 // %1 = add %b, %a
713 //
714 // and
715 //
716 // %0 = shl nsw %a, 2
717 // %1 = shl %a, 2
718 //
719 // but EarlyCSE can do neither of them.
Nikita Popov3db93ac2019-04-07 17:22:16 +0000720 if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses)
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000721 addEarlyCSEOrGVNPass();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000722}
723
Matt Arsenault908b9e22016-07-01 03:33:52 +0000724void AMDGPUPassConfig::addCodeGenPrepare() {
Aakanksha Patilc56d2af2019-03-07 00:54:04 +0000725 if (TM->getTargetTriple().getArch() == Triple::amdgcn)
726 addPass(createAMDGPUAnnotateKernelFeaturesPass());
727
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000728 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
729 EnableLowerKernelArguments)
730 addPass(createAMDGPULowerKernelArgumentsPass());
731
Matt Arsenault908b9e22016-07-01 03:33:52 +0000732 TargetPassConfig::addCodeGenPrepare();
733
734 if (EnableLoadStoreVectorizer)
735 addPass(createLoadStoreVectorizerPass());
736}
737
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000738bool AMDGPUPassConfig::addPreISel() {
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +0000739 addPass(createLowerSwitchPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000740 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000741 return false;
742}
743
744bool AMDGPUPassConfig::addInstSelector() {
Matt Arsenault7016f132017-08-03 22:30:46 +0000745 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000746 return false;
747}
748
Matt Arsenault0a109002015-09-25 17:41:20 +0000749bool AMDGPUPassConfig::addGCPasses() {
750 // Do nothing. GC is not supported.
751 return false;
752}
753
Tom Stellard45bb48e2015-06-13 03:28:10 +0000754//===----------------------------------------------------------------------===//
755// R600 Pass Setup
756//===----------------------------------------------------------------------===//
757
758bool R600PassConfig::addPreISel() {
759 AMDGPUPassConfig::addPreISel();
Matt Arsenaultc5816112016-06-24 06:30:22 +0000760
761 if (EnableR600StructurizeCFG)
Tom Stellardbc4497b2016-02-12 23:45:29 +0000762 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000763 return false;
764}
765
Tom Stellard20287692017-08-08 04:57:55 +0000766bool R600PassConfig::addInstSelector() {
767 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
768 return false;
769}
770
Tom Stellard45bb48e2015-06-13 03:28:10 +0000771void R600PassConfig::addPreRegAlloc() {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000772 addPass(createR600VectorRegMerger());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000773}
774
775void R600PassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000776 addPass(createR600EmitClauseMarkers(), false);
Matt Arsenault03d85842016-06-27 20:32:13 +0000777 if (EnableR600IfConvert)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000778 addPass(&IfConverterID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000779 addPass(createR600ClauseMergePass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000780}
781
782void R600PassConfig::addPreEmitPass() {
783 addPass(createAMDGPUCFGStructurizerPass(), false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000784 addPass(createR600ExpandSpecialInstrsPass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000785 addPass(&FinalizeMachineBundlesID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000786 addPass(createR600Packetizer(), false);
787 addPass(createR600ControlFlowFinalizer(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000788}
789
790TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +0000791 return new R600PassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000792}
793
794//===----------------------------------------------------------------------===//
795// GCN Pass Setup
796//===----------------------------------------------------------------------===//
797
Matt Arsenault03d85842016-06-27 20:32:13 +0000798ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
799 MachineSchedContext *C) const {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000800 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000801 if (ST.enableSIScheduler())
802 return createSIMachineScheduler(C);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000803 return createGCNMaxOccupancyMachineScheduler(C);
Matt Arsenault03d85842016-06-27 20:32:13 +0000804}
805
Tom Stellard45bb48e2015-06-13 03:28:10 +0000806bool GCNPassConfig::addPreISel() {
807 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000808
Neil Henning66416572018-10-08 15:49:19 +0000809 if (EnableAtomicOptimizations) {
810 addPass(createAMDGPUAtomicOptimizerPass());
811 }
812
Matt Arsenault39319482015-11-06 18:01:57 +0000813 // FIXME: We need to run a pass to propagate the attributes when calls are
814 // supported.
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000815
816 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
817 // regions formed by them.
818 addPass(&AMDGPUUnifyDivergentExitNodesID);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000819 if (!LateCFGStructurize) {
820 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
821 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000822 addPass(createSinkingPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000823 addPass(createAMDGPUAnnotateUniformValues());
Jan Sjodina06bfe02017-05-15 20:18:37 +0000824 if (!LateCFGStructurize) {
825 addPass(createSIAnnotateControlFlowPass());
826 }
Tom Stellarda6f24c62015-12-15 20:55:55 +0000827
Tom Stellard45bb48e2015-06-13 03:28:10 +0000828 return false;
829}
830
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000831void GCNPassConfig::addMachineSSAOptimization() {
832 TargetPassConfig::addMachineSSAOptimization();
833
834 // We want to fold operands after PeepholeOptimizer has run (or as part of
835 // it), because it will eliminate extra copies making it easier to fold the
836 // real source operand. We want to eliminate dead instructions after, so that
837 // we see fewer uses of the copies. We then need to clean up the dead
838 // instructions leftover after the operands are folded as well.
839 //
840 // XXX - Can we get away without running DeadMachineInstructionElim again?
841 addPass(&SIFoldOperandsID);
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000842 if (EnableDPPCombine)
843 addPass(&GCNDPPCombineID);
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000844 addPass(&DeadMachineInstructionElimID);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000845 addPass(&SILoadStoreOptimizerID);
Sam Kolton6e795292017-04-07 10:53:12 +0000846 if (EnableSDWAPeephole) {
847 addPass(&SIPeepholeSDWAID);
Matthias Braun4a7c8e72018-01-19 06:46:10 +0000848 addPass(&EarlyMachineLICMID);
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000849 addPass(&MachineCSEID);
850 addPass(&SIFoldOperandsID);
Sam Kolton6e795292017-04-07 10:53:12 +0000851 addPass(&DeadMachineInstructionElimID);
852 }
Stanislav Mekhanoshin03306602017-06-03 17:39:47 +0000853 addPass(createSIShrinkInstructionsPass());
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000854}
855
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000856bool GCNPassConfig::addILPOpts() {
857 if (EnableEarlyIfConversion)
858 addPass(&EarlyIfConverterID);
859
860 TargetPassConfig::addILPOpts();
861 return false;
862}
863
Tom Stellard45bb48e2015-06-13 03:28:10 +0000864bool GCNPassConfig::addInstSelector() {
865 AMDGPUPassConfig::addInstSelector();
Matt Arsenault782c03b2015-11-03 22:30:13 +0000866 addPass(&SIFixSGPRCopiesID);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000867 addPass(createSILowerI1CopiesPass());
Ron Liebermancac749a2018-11-16 01:13:34 +0000868 addPass(createSIFixupVectorISelPass());
David Stuttardf77079f2019-01-14 11:55:24 +0000869 addPass(createSIAddIMGInitPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000870 return false;
871}
872
Tom Stellard000c5af2016-04-14 19:09:28 +0000873bool GCNPassConfig::addIRTranslator() {
874 addPass(new IRTranslator());
875 return false;
876}
877
Tim Northover33b07d62016-07-22 20:03:43 +0000878bool GCNPassConfig::addLegalizeMachineIR() {
Tom Stellardca166212017-01-30 21:56:46 +0000879 addPass(new Legalizer());
Tim Northover33b07d62016-07-22 20:03:43 +0000880 return false;
881}
882
Tom Stellard000c5af2016-04-14 19:09:28 +0000883bool GCNPassConfig::addRegBankSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000884 addPass(new RegBankSelect());
Tom Stellard000c5af2016-04-14 19:09:28 +0000885 return false;
886}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000887
888bool GCNPassConfig::addGlobalInstructionSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000889 addPass(new InstructionSelect());
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000890 return false;
891}
Tom Stellardca166212017-01-30 21:56:46 +0000892
Tom Stellard45bb48e2015-06-13 03:28:10 +0000893void GCNPassConfig::addPreRegAlloc() {
Jan Sjodina06bfe02017-05-15 20:18:37 +0000894 if (LateCFGStructurize) {
895 addPass(createAMDGPUMachineCFGStructurizerPass());
896 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000897 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000898}
899
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000900void GCNPassConfig::addFastRegAlloc() {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000901 // FIXME: We have to disable the verifier here because of PHIElimination +
902 // TwoAddressInstructions disabling it.
Matt Arsenaulte6740752016-09-29 01:44:16 +0000903
904 // This must be run immediately after phi elimination and before
905 // TwoAddressInstructions, otherwise the processing of the tied operand of
906 // SI_ELSE will introduce a copy of the tied operand source after the else.
907 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000908
Neil Henning0a30f332019-04-01 15:19:52 +0000909 // This must be run just after RegisterCoalescing.
910 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
Connor Abbott92638ab2017-08-04 18:36:52 +0000911
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000912 TargetPassConfig::addFastRegAlloc();
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000913}
914
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000915void GCNPassConfig::addOptimizedRegAlloc() {
Matt Arsenault4d47ac32019-03-27 16:58:30 +0000916 if (OptExecMaskPreRA) {
917 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
918 insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
919 } else {
920 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
921 }
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000922
Matt Arsenaulte6740752016-09-29 01:44:16 +0000923 // This must be run immediately after phi elimination and before
924 // TwoAddressInstructions, otherwise the processing of the tied operand of
925 // SI_ELSE will introduce a copy of the tied operand source after the else.
926 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000927
Neil Henning0a30f332019-04-01 15:19:52 +0000928 // This must be run just after RegisterCoalescing.
929 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
Connor Abbott92638ab2017-08-04 18:36:52 +0000930
Stanislav Mekhanoshinc8f78f82019-04-05 20:11:32 +0000931 if (EnableDCEInRA)
932 insertPass(&RenameIndependentSubregsID, &DeadMachineInstructionElimID);
933
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000934 TargetPassConfig::addOptimizedRegAlloc();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000935}
936
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000937bool GCNPassConfig::addPreRewrite() {
938 if (EnableRegReassign) {
939 addPass(&GCNNSAReassignID);
940 }
941 return true;
942}
943
Matt Arsenaulte6740752016-09-29 01:44:16 +0000944void GCNPassConfig::addPostRegAlloc() {
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000945 addPass(&SIFixVGPRCopiesID);
Matt Arsenault105fc1a2018-11-26 17:02:02 +0000946 if (getOptLevel() > CodeGenOpt::None)
947 addPass(&SIOptimizeExecMaskingID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000948 TargetPassConfig::addPostRegAlloc();
949}
950
Tom Stellard45bb48e2015-06-13 03:28:10 +0000951void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000952}
953
954void GCNPassConfig::addPreEmitPass() {
Mark Searles72da47d2018-07-16 10:02:41 +0000955 addPass(createSIMemoryLegalizerPass());
956 addPass(createSIInsertWaitcntsPass());
957 addPass(createSIShrinkInstructionsPass());
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000958 addPass(createSIModeRegisterPass());
Mark Searles72da47d2018-07-16 10:02:41 +0000959
Tom Stellardcb6ba622016-04-30 00:23:06 +0000960 // The hazard recognizer that runs as part of the post-ra scheduler does not
Matt Arsenault254a6452016-06-28 16:59:53 +0000961 // guarantee to be able handle all hazards correctly. This is because if there
962 // are multiple scheduling regions in a basic block, the regions are scheduled
963 // bottom up, so when we begin to schedule a region we don't know what
964 // instructions were emitted directly before it.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000965 //
Matt Arsenault254a6452016-06-28 16:59:53 +0000966 // Here we add a stand-alone hazard recognizer pass which can handle all
967 // cases.
Mark Searles72da47d2018-07-16 10:02:41 +0000968 //
969 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
970 // be better for it to emit S_NOP <N> when possible.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000971 addPass(&PostRAHazardRecognizerID);
972
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000973 addPass(&SIInsertSkipsPassID);
Matt Arsenault6bc43d82016-10-06 16:20:41 +0000974 addPass(&BranchRelaxationPassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000975}
976
977TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +0000978 return new GCNPassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000979}
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +0000980
981yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
982 return new yaml::SIMachineFunctionInfo();
983}
984
985yaml::MachineFunctionInfo *
986GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
987 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
988 return new yaml::SIMachineFunctionInfo(*MFI,
989 *MF.getSubtarget().getRegisterInfo());
990}
991
992bool GCNTargetMachine::parseMachineFunctionInfo(
993 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
994 SMDiagnostic &Error, SMRange &SourceRange) const {
995 const yaml::SIMachineFunctionInfo &YamlMFI =
996 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
997 MachineFunction &MF = PFS.MF;
998 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
999
1000 MFI->initializeBaseYamlFields(YamlMFI);
1001
1002 auto parseRegister = [&](const yaml::StringValue &RegName, unsigned &RegVal) {
1003 if (parseNamedRegisterReference(PFS, RegVal, RegName.Value, Error)) {
1004 SourceRange = RegName.SourceRange;
1005 return true;
1006 }
1007
1008 return false;
1009 };
1010
1011 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1012 // Create a diagnostic for a the register string literal.
1013 const MemoryBuffer &Buffer =
1014 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1015 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1016 RegName.Value.size(), SourceMgr::DK_Error,
1017 "incorrect register class for field", RegName.Value,
1018 None, None);
1019 SourceRange = RegName.SourceRange;
1020 return true;
1021 };
1022
1023 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1024 parseRegister(YamlMFI.ScratchWaveOffsetReg, MFI->ScratchWaveOffsetReg) ||
1025 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1026 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1027 return true;
1028
1029 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1030 !AMDGPU::SReg_128RegClass.contains(MFI->ScratchRSrcReg)) {
1031 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1032 }
1033
1034 if (MFI->ScratchWaveOffsetReg != AMDGPU::SCRATCH_WAVE_OFFSET_REG &&
1035 !AMDGPU::SGPR_32RegClass.contains(MFI->ScratchWaveOffsetReg)) {
1036 return diagnoseRegisterClass(YamlMFI.ScratchWaveOffsetReg);
1037 }
1038
1039 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1040 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1041 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1042 }
1043
1044 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1045 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1046 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1047 }
1048
1049 return false;
1050}