blob: 0d05f2445ba1814ed5013ef27140e1db3687069d [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// The AMDGPU target machine contains all of the hardware specific
Tom Stellard45bb48e2015-06-13 03:28:10 +000011/// information needed to emit code for R600 and SI GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +000017#include "AMDGPUAliasAnalysis.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000018#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000019#include "AMDGPUInstructionSelector.h"
20#include "AMDGPULegalizerInfo.h"
Matt Arsenault9aa45f02017-07-06 20:57:05 +000021#include "AMDGPUMacroFusion.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000022#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "AMDGPUTargetTransformInfo.h"
Valery Pykhtinfd4c4102017-03-21 13:15:46 +000024#include "GCNIterativeScheduler.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000025#include "GCNSchedStrategy.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "R600MachineScheduler.h"
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000027#include "SIMachineFunctionInfo.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000028#include "SIMachineScheduler.h"
Richard Trieu8ce2ee92019-05-14 21:54:37 +000029#include "TargetInfo/AMDGPUTargetInfo.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000030#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000031#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
Tom Stellardca166212017-01-30 21:56:46 +000032#include "llvm/CodeGen/GlobalISel/Legalizer.h"
33#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000034#include "llvm/CodeGen/MIRParser/MIParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/CodeGen/Passes.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000036#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000037#include "llvm/IR/Attributes.h"
38#include "llvm/IR/Function.h"
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +000039#include "llvm/IR/LegacyPassManager.h"
Reid Kleckner05da2fe2019-11-13 13:15:01 -080040#include "llvm/InitializePasses.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000041#include "llvm/Pass.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/Compiler.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000044#include "llvm/Support/TargetRegistry.h"
David Blaikie6054e652018-03-23 23:58:19 +000045#include "llvm/Target/TargetLoweringObjectFile.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000046#include "llvm/Transforms/IPO.h"
47#include "llvm/Transforms/IPO/AlwaysInliner.h"
48#include "llvm/Transforms/IPO/PassManagerBuilder.h"
49#include "llvm/Transforms/Scalar.h"
50#include "llvm/Transforms/Scalar/GVN.h"
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +000051#include "llvm/Transforms/Utils.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000052#include "llvm/Transforms/Vectorize.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000053#include <memory>
Tom Stellard45bb48e2015-06-13 03:28:10 +000054
55using namespace llvm;
56
Matt Arsenaultc5816112016-06-24 06:30:22 +000057static cl::opt<bool> EnableR600StructurizeCFG(
58 "r600-ir-structurize",
59 cl::desc("Use StructurizeCFG IR pass"),
60 cl::init(true));
61
Matt Arsenault03d85842016-06-27 20:32:13 +000062static cl::opt<bool> EnableSROA(
63 "amdgpu-sroa",
64 cl::desc("Run SROA after promote alloca pass"),
65 cl::ReallyHidden,
66 cl::init(true));
67
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +000068static cl::opt<bool>
69EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
70 cl::desc("Run early if-conversion"),
71 cl::init(false));
72
Matt Arsenault4d47ac32019-03-27 16:58:30 +000073static cl::opt<bool>
74OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
75 cl::desc("Run pre-RA exec mask optimizations"),
76 cl::init(true));
77
Matt Arsenault03d85842016-06-27 20:32:13 +000078static cl::opt<bool> EnableR600IfConvert(
79 "r600-if-convert",
80 cl::desc("Use if conversion pass"),
81 cl::ReallyHidden,
82 cl::init(true));
83
Matt Arsenault908b9e22016-07-01 03:33:52 +000084// Option to disable vectorizer for tests.
85static cl::opt<bool> EnableLoadStoreVectorizer(
86 "amdgpu-load-store-vectorizer",
87 cl::desc("Enable load store vectorizer"),
Matt Arsenault0efdd062016-09-09 22:29:28 +000088 cl::init(true),
Matt Arsenault908b9e22016-07-01 03:33:52 +000089 cl::Hidden);
90
Hiroshi Inouec8e92452018-01-29 05:17:03 +000091// Option to control global loads scalarization
Alexander Timofeev18009562016-12-08 17:28:47 +000092static cl::opt<bool> ScalarizeGlobal(
93 "amdgpu-scalarize-global-loads",
94 cl::desc("Enable global load scalarization"),
Alexander Timofeev982aee62017-07-04 17:32:00 +000095 cl::init(true),
Alexander Timofeev18009562016-12-08 17:28:47 +000096 cl::Hidden);
97
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +000098// Option to run internalize pass.
99static cl::opt<bool> InternalizeSymbols(
100 "amdgpu-internalize-symbols",
101 cl::desc("Enable elimination of non-kernel functions and unused globals"),
102 cl::init(false),
103 cl::Hidden);
104
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000105// Option to inline all early.
106static cl::opt<bool> EarlyInlineAll(
107 "amdgpu-early-inline-all",
108 cl::desc("Inline all functions early"),
109 cl::init(false),
110 cl::Hidden);
111
Sam Koltonf60ad582017-03-21 12:51:34 +0000112static cl::opt<bool> EnableSDWAPeephole(
113 "amdgpu-sdwa-peephole",
114 cl::desc("Enable SDWA peepholer"),
Sam Kolton9fa16962017-04-06 15:03:28 +0000115 cl::init(true));
Sam Koltonf60ad582017-03-21 12:51:34 +0000116
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000117static cl::opt<bool> EnableDPPCombine(
118 "amdgpu-dpp-combine",
119 cl::desc("Enable DPP combiner"),
Valery Pykhtinded96df2019-02-11 11:15:03 +0000120 cl::init(true));
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000121
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000122// Enable address space based alias analysis
123static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
124 cl::desc("Enable AMDGPU Alias Analysis"),
125 cl::init(true));
126
Jan Sjodina06bfe02017-05-15 20:18:37 +0000127// Option to run late CFG structurizer
Matt Arsenaultcc852232017-10-10 20:22:07 +0000128static cl::opt<bool, true> LateCFGStructurize(
Jan Sjodina06bfe02017-05-15 20:18:37 +0000129 "amdgpu-late-structurize",
130 cl::desc("Enable late CFG structurization"),
Matt Arsenaultcc852232017-10-10 20:22:07 +0000131 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
Jan Sjodina06bfe02017-05-15 20:18:37 +0000132 cl::Hidden);
133
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000134static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt(
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000135 "amdgpu-function-calls",
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000136 cl::desc("Enable AMDGPU function call support"),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000137 cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000138 cl::init(true),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000139 cl::Hidden);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000140
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000141// Enable lib calls simplifications
142static cl::opt<bool> EnableLibCallSimplify(
143 "amdgpu-simplify-libcall",
Matt Arsenault2e4d3382018-05-29 19:35:46 +0000144 cl::desc("Enable amdgpu library simplifications"),
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000145 cl::init(true),
146 cl::Hidden);
147
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000148static cl::opt<bool> EnableLowerKernelArguments(
149 "amdgpu-ir-lower-kernel-arguments",
150 cl::desc("Lower kernel argument loads in IR pass"),
151 cl::init(true),
152 cl::Hidden);
153
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000154static cl::opt<bool> EnableRegReassign(
155 "amdgpu-reassign-regs",
156 cl::desc("Enable register reassign optimizations on gfx10+"),
157 cl::init(true),
158 cl::Hidden);
159
Neil Henning66416572018-10-08 15:49:19 +0000160// Enable atomic optimization
161static cl::opt<bool> EnableAtomicOptimizations(
162 "amdgpu-atomic-optimizations",
163 cl::desc("Enable atomic optimizations"),
164 cl::init(false),
165 cl::Hidden);
166
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000167// Enable Mode register optimization
168static cl::opt<bool> EnableSIModeRegisterPass(
169 "amdgpu-mode-register",
170 cl::desc("Enable mode register pass"),
171 cl::init(true),
172 cl::Hidden);
173
Stanislav Mekhanoshinc8f78f82019-04-05 20:11:32 +0000174// Option is used in lit tests to prevent deadcoding of patterns inspected.
175static cl::opt<bool>
176EnableDCEInRA("amdgpu-dce-in-ra",
177 cl::init(true), cl::Hidden,
178 cl::desc("Enable machine DCE inside regalloc"));
179
Nikita Popov3db93ac2019-04-07 17:22:16 +0000180static cl::opt<bool> EnableScalarIRPasses(
181 "amdgpu-scalar-ir-passes",
182 cl::desc("Enable scalar IR passes"),
183 cl::init(true),
184 cl::Hidden);
185
Tom Stellard4b0b2612019-06-11 03:21:13 +0000186extern "C" void LLVMInitializeAMDGPUTarget() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000187 // Register the target
Mehdi Aminif42454b2016-10-09 23:00:34 +0000188 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
189 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000190
191 PassRegistry *PR = PassRegistry::getPassRegistry();
Tom Stellarda2f57be2017-08-02 22:19:45 +0000192 initializeR600ClauseMergePassPass(*PR);
193 initializeR600ControlFlowFinalizerPass(*PR);
194 initializeR600PacketizerPass(*PR);
195 initializeR600ExpandSpecialInstrsPassPass(*PR);
196 initializeR600VectorRegMergerPass(*PR);
Tom Stellarde753c522018-04-09 16:09:13 +0000197 initializeGlobalISel(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000198 initializeAMDGPUDAGToDAGISelPass(*PR);
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000199 initializeGCNDPPCombinePass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000200 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault5b0922f2019-07-03 23:32:29 +0000201 initializeSILowerSGPRSpillsPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +0000202 initializeSIFixSGPRCopiesPass(*PR);
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000203 initializeSIFixVGPRCopiesPass(*PR);
Ron Liebermancac749a2018-11-16 01:13:34 +0000204 initializeSIFixupVectorISelPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000205 initializeSIFoldOperandsPass(*PR);
Sam Koltonf60ad582017-03-21 12:51:34 +0000206 initializeSIPeepholeSDWAPass(*PR);
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +0000207 initializeSIShrinkInstructionsPass(*PR);
Stanislav Mekhanoshin37e7f952017-08-01 23:14:32 +0000208 initializeSIOptimizeExecMaskingPreRAPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +0000209 initializeSILoadStoreOptimizerPass(*PR);
Scott Linder11ef7982018-10-26 13:18:36 +0000210 initializeAMDGPUFixFunctionBitcastsPass(*PR);
Matt Arsenault746e0652017-06-02 18:02:42 +0000211 initializeAMDGPUAlwaysInlinePass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +0000212 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +0000213 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000214 initializeAMDGPUArgumentUsageInfoPass(*PR);
Neil Henning66416572018-10-08 15:49:19 +0000215 initializeAMDGPUAtomicOptimizerPass(*PR);
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000216 initializeAMDGPULowerKernelArgumentsPass(*PR);
Matt Arsenault372d7962018-05-18 21:35:00 +0000217 initializeAMDGPULowerKernelAttributesPass(*PR);
Matt Arsenault0699ef32017-02-09 22:00:42 +0000218 initializeAMDGPULowerIntrinsicsPass(*PR);
Yaxun Liude4b88d2017-10-10 19:39:48 +0000219 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000220 initializeAMDGPUPromoteAllocaPass(*PR);
Matt Arsenault86de4862016-06-24 07:07:55 +0000221 initializeAMDGPUCodeGenPreparePass(*PR);
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000222 initializeAMDGPUPropagateAttributesEarlyPass(*PR);
223 initializeAMDGPUPropagateAttributesLatePass(*PR);
Matt Arsenaultc06574f2017-07-28 18:40:05 +0000224 initializeAMDGPURewriteOutArgumentsPass(*PR);
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000225 initializeAMDGPUUnifyMetadataPass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +0000226 initializeSIAnnotateControlFlowPass(*PR);
Kannan Narayananacb089e2017-04-12 03:25:12 +0000227 initializeSIInsertWaitcntsPass(*PR);
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000228 initializeSIModeRegisterPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000229 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000230 initializeSILowerControlFlowPass(*PR);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000231 initializeSIInsertSkipsPass(*PR);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000232 initializeSIMemoryLegalizerPass(*PR);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000233 initializeSIOptimizeExecMaskingPass(*PR);
Neil Henning0a30f332019-04-01 15:19:52 +0000234 initializeSIPreAllocateWWMRegsPass(*PR);
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000235 initializeSIFormMemoryClausesPass(*PR);
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000236 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000237 initializeAMDGPUAAWrapperPassPass(*PR);
Matt Arsenault8ba740a2018-11-07 20:26:42 +0000238 initializeAMDGPUExternalAAWrapperPass(*PR);
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000239 initializeAMDGPUUseNativeCallsPass(*PR);
240 initializeAMDGPUSimplifyLibCallsPass(*PR);
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000241 initializeAMDGPUInlinerPass(*PR);
Stanislav Mekhanoshin4c9c98f2019-08-12 17:12:29 +0000242 initializeAMDGPUPrintfRuntimeBindingPass(*PR);
Stanislav Mekhanoshin3b7925f2019-05-01 16:49:31 +0000243 initializeGCNRegBankReassignPass(*PR);
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000244 initializeGCNNSAReassignPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000245}
246
Tom Stellarde135ffd2015-09-25 21:41:28 +0000247static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000248 return std::make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000249}
250
Tom Stellard45bb48e2015-06-13 03:28:10 +0000251static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000252 return new ScheduleDAGMILive(C, std::make_unique<R600SchedStrategy>());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000253}
254
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +0000255static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
256 return new SIScheduleDAGMI(C);
257}
258
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000259static ScheduleDAGInstrs *
260createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
261 ScheduleDAGMILive *DAG =
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000262 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
Matthias Braun115efcd2016-11-28 20:11:54 +0000263 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
264 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
Matt Arsenault9aa45f02017-07-06 20:57:05 +0000265 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000266 return DAG;
267}
268
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000269static ScheduleDAGInstrs *
270createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
271 auto DAG = new GCNIterativeScheduler(C,
272 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
273 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
274 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
275 return DAG;
276}
277
278static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
279 return new GCNIterativeScheduler(C,
280 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
281}
282
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000283static ScheduleDAGInstrs *
284createIterativeILPMachineScheduler(MachineSchedContext *C) {
285 auto DAG = new GCNIterativeScheduler(C,
286 GCNIterativeScheduler::SCHEDULE_ILP);
287 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
288 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
289 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
290 return DAG;
291}
292
Tom Stellard45bb48e2015-06-13 03:28:10 +0000293static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +0000294R600SchedRegistry("r600", "Run R600's custom scheduler",
295 createR600MachineScheduler);
296
297static MachineSchedRegistry
298SISchedRegistry("si", "Run SI's custom scheduler",
299 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000300
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000301static MachineSchedRegistry
302GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
303 "Run GCN scheduler to maximize occupancy",
304 createGCNMaxOccupancyMachineScheduler);
305
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000306static MachineSchedRegistry
307IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
308 "Run GCN scheduler to maximize occupancy (experimental)",
309 createIterativeGCNMaxOccupancyMachineScheduler);
310
311static MachineSchedRegistry
312GCNMinRegSchedRegistry("gcn-minreg",
313 "Run GCN iterative scheduler for minimal register usage (experimental)",
314 createMinRegScheduler);
315
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000316static MachineSchedRegistry
317GCNILPSchedRegistry("gcn-ilp",
318 "Run GCN iterative scheduler for ILP scheduling (experimental)",
319 createIterativeILPMachineScheduler);
320
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000321static StringRef computeDataLayout(const Triple &TT) {
322 if (TT.getArch() == Triple::r600) {
323 // 32-bit pointers.
Yaxun Liucc56a8b2017-11-06 14:32:33 +0000324 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Matt Arsenault95329f82018-03-27 19:26:40 +0000325 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000326 }
327
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000328 // 32-bit private, local, and region pointers. 64-bit global, constant and
Neil Henning523dab02019-03-18 14:44:28 +0000329 // flat, non-integral buffer fat pointers.
Yaxun Liu0124b542018-02-13 18:00:25 +0000330 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000331 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Neil Henning523dab02019-03-18 14:44:28 +0000332 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
333 "-ni:7";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000334}
335
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000336LLVM_READNONE
337static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
338 if (!GPU.empty())
339 return GPU;
340
Matt Arsenaulte0c1f9e2019-03-17 21:31:35 +0000341 // Need to default to a target with flat support for HSA.
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000342 if (TT.getArch() == Triple::amdgcn)
Matt Arsenaulte0c1f9e2019-03-17 21:31:35 +0000343 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000344
Matt Arsenault8e001942016-06-02 18:37:16 +0000345 return "r600";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000346}
347
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000348static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
Tom Stellard418beb72016-07-13 14:23:33 +0000349 // The AMDGPU toolchain only supports generating shared objects, so we
350 // must always use PIC.
351 return Reloc::PIC_;
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000352}
353
Tom Stellard45bb48e2015-06-13 03:28:10 +0000354AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
355 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000356 TargetOptions Options,
357 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000358 Optional<CodeModel::Model> CM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000359 CodeGenOpt::Level OptLevel)
Matthias Braunbb8507e2017-10-12 22:57:28 +0000360 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
361 FS, Options, getEffectiveRelocModel(RM),
David Greenca29c272018-12-07 12:10:23 +0000362 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
Rafael Espindola79e238a2017-08-03 02:16:21 +0000363 TLOF(createTLOF(getTargetTriple())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000364 initAsmInfo();
365}
366
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000367bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
Matt Arsenaulta6801992018-07-10 14:03:41 +0000368bool AMDGPUTargetMachine::EnableFunctionCalls = false;
369
370AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000371
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000372StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
373 Attribute GPUAttr = F.getFnAttribute("target-cpu");
374 return GPUAttr.hasAttribute(Attribute::None) ?
375 getTargetCPU() : GPUAttr.getValueAsString();
376}
377
378StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
379 Attribute FSAttr = F.getFnAttribute("target-features");
380
381 return FSAttr.hasAttribute(Attribute::None) ?
382 getTargetFeatureString() :
383 FSAttr.getValueAsString();
384}
385
Matt Arsenaulte745d992017-09-19 07:40:11 +0000386/// Predicate for Internalize pass.
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000387static bool mustPreserveGV(const GlobalValue &GV) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000388 if (const Function *F = dyn_cast<Function>(&GV))
389 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
390
391 return !GV.use_empty();
392}
393
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000394void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
Stanislav Mekhanoshinee2dd782017-03-17 17:13:41 +0000395 Builder.DivergentTarget = true;
396
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000397 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
Matt Arsenaulte745d992017-09-19 07:40:11 +0000398 bool Internalize = InternalizeSymbols;
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000399 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000400 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
401 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000402
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000403 if (EnableFunctionCalls) {
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000404 delete Builder.Inliner;
Stanislav Mekhanoshin56418202017-09-20 06:10:15 +0000405 Builder.Inliner = createAMDGPUFunctionInliningPass();
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000406 }
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000407
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000408 Builder.addExtension(
Stanislav Mekhanoshinf6c1feb2017-01-27 16:38:10 +0000409 PassManagerBuilder::EP_ModuleOptimizerEarly,
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000410 [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &,
411 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000412 if (AMDGPUAA) {
413 PM.add(createAMDGPUAAWrapperPass());
414 PM.add(createAMDGPUExternalAAWrapperPass());
415 }
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000416 PM.add(createAMDGPUUnifyMetadataPass());
Stanislav Mekhanoshin4c9c98f2019-08-12 17:12:29 +0000417 PM.add(createAMDGPUPrintfRuntimeBinding());
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000418 PM.add(createAMDGPUPropagateAttributesLatePass(this));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000419 if (Internalize) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000420 PM.add(createInternalizePass(mustPreserveGV));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000421 PM.add(createGlobalDCEPass());
422 }
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000423 if (EarlyInline)
Stanislav Mekhanoshin89653df2017-03-30 20:16:02 +0000424 PM.add(createAMDGPUAlwaysInlinePass(false));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000425 });
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000426
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000427 const auto &Opt = Options;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000428 Builder.addExtension(
429 PassManagerBuilder::EP_EarlyAsPossible,
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000430 [AMDGPUAA, LibCallSimplify, &Opt, this](const PassManagerBuilder &,
431 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000432 if (AMDGPUAA) {
433 PM.add(createAMDGPUAAWrapperPass());
434 PM.add(createAMDGPUExternalAAWrapperPass());
435 }
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000436 PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this));
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000437 PM.add(llvm::createAMDGPUUseNativeCallsPass());
438 if (LibCallSimplify)
Stanislav Mekhanoshina9191c82019-06-17 17:57:50 +0000439 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt, this));
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000440 });
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000441
442 Builder.addExtension(
443 PassManagerBuilder::EP_CGSCCOptimizerLate,
444 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
445 // Add infer address spaces pass to the opt pipeline after inlining
446 // but before SROA to increase SROA opportunities.
447 PM.add(createInferAddressSpacesPass());
Matt Arsenault372d7962018-05-18 21:35:00 +0000448
449 // This should run after inlining to have any chance of doing anything,
450 // and before other cleanup optimizations.
451 PM.add(createAMDGPULowerKernelAttributesPass());
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000452 });
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000453}
454
Tom Stellard45bb48e2015-06-13 03:28:10 +0000455//===----------------------------------------------------------------------===//
456// R600 Target Machine (R600 -> Cayman)
457//===----------------------------------------------------------------------===//
458
459R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000460 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000461 TargetOptions Options,
462 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000463 Optional<CodeModel::Model> CM,
464 CodeGenOpt::Level OL, bool JIT)
465 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000466 setRequiresStructuredCFG(true);
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000467
Matt Arsenault09a09ef2019-02-28 00:52:33 +0000468 // Override the default since calls aren't supported for r600.
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000469 if (EnableFunctionCalls &&
470 EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
471 EnableFunctionCalls = false;
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000472}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000473
474const R600Subtarget *R600TargetMachine::getSubtargetImpl(
475 const Function &F) const {
476 StringRef GPU = getGPUName(F);
477 StringRef FS = getFeatureString(F);
478
479 SmallString<128> SubtargetKey(GPU);
480 SubtargetKey.append(FS);
481
482 auto &I = SubtargetMap[SubtargetKey];
483 if (!I) {
484 // This needs to be done before we create a new subtarget since any
485 // creation will depend on the TM and the code generation flags on the
486 // function that reside in TargetOptions.
487 resetTargetOptions(F);
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000488 I = std::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000489 }
490
491 return I.get();
492}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000493
Tom Stellardc7624312018-05-30 22:55:35 +0000494TargetTransformInfo
495R600TargetMachine::getTargetTransformInfo(const Function &F) {
496 return TargetTransformInfo(R600TTIImpl(this, F));
497}
498
Tom Stellard45bb48e2015-06-13 03:28:10 +0000499//===----------------------------------------------------------------------===//
500// GCN Target Machine (SI+)
501//===----------------------------------------------------------------------===//
502
503GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000504 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000505 TargetOptions Options,
506 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000507 Optional<CodeModel::Model> CM,
508 CodeGenOpt::Level OL, bool JIT)
509 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000510
Tom Stellard5bfbae52018-07-11 20:59:01 +0000511const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000512 StringRef GPU = getGPUName(F);
513 StringRef FS = getFeatureString(F);
514
515 SmallString<128> SubtargetKey(GPU);
516 SubtargetKey.append(FS);
517
518 auto &I = SubtargetMap[SubtargetKey];
519 if (!I) {
520 // This needs to be done before we create a new subtarget since any
521 // creation will depend on the TM and the code generation flags on the
522 // function that reside in TargetOptions.
523 resetTargetOptions(F);
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000524 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000525 }
526
Alexander Timofeev18009562016-12-08 17:28:47 +0000527 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
528
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000529 return I.get();
530}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000531
Tom Stellardc7624312018-05-30 22:55:35 +0000532TargetTransformInfo
533GCNTargetMachine::getTargetTransformInfo(const Function &F) {
534 return TargetTransformInfo(GCNTTIImpl(this, F));
535}
536
Tom Stellard45bb48e2015-06-13 03:28:10 +0000537//===----------------------------------------------------------------------===//
538// AMDGPU Pass Setup
539//===----------------------------------------------------------------------===//
540
541namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000542
Tom Stellard45bb48e2015-06-13 03:28:10 +0000543class AMDGPUPassConfig : public TargetPassConfig {
544public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000545 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000546 : TargetPassConfig(TM, PM) {
Matt Arsenault0a109002015-09-25 17:41:20 +0000547 // Exceptions and StackMaps are not supported, so these passes will never do
548 // anything.
549 disablePass(&StackMapLivenessID);
550 disablePass(&FuncletLayoutID);
551 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000552
553 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
554 return getTM<AMDGPUTargetMachine>();
555 }
556
Matthias Braun115efcd2016-11-28 20:11:54 +0000557 ScheduleDAGInstrs *
558 createMachineScheduler(MachineSchedContext *C) const override {
559 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
560 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
561 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
562 return DAG;
563 }
564
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000565 void addEarlyCSEOrGVNPass();
566 void addStraightLineScalarOptimizationPasses();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000567 void addIRPasses() override;
Matt Arsenault908b9e22016-07-01 03:33:52 +0000568 void addCodeGenPrepare() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000569 bool addPreISel() override;
570 bool addInstSelector() override;
571 bool addGCPasses() override;
Amara Emersond1896802019-04-15 04:53:46 +0000572
573 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000574};
575
Amara Emersond1896802019-04-15 04:53:46 +0000576std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const {
577 return getStandardCSEConfigForOpt(TM->getOptLevel());
578}
579
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000580class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000581public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000582 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000583 : AMDGPUPassConfig(TM, PM) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000584
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000585 ScheduleDAGInstrs *createMachineScheduler(
586 MachineSchedContext *C) const override {
587 return createR600MachineScheduler(C);
588 }
589
Tom Stellard45bb48e2015-06-13 03:28:10 +0000590 bool addPreISel() override;
Tom Stellard20287692017-08-08 04:57:55 +0000591 bool addInstSelector() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000592 void addPreRegAlloc() override;
593 void addPreSched2() override;
594 void addPreEmitPass() override;
595};
596
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000597class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000598public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000599 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000600 : AMDGPUPassConfig(TM, PM) {
Matt Arsenaulta2025382017-08-03 23:24:05 +0000601 // It is necessary to know the register usage of the entire call graph. We
602 // allow calls without EnableAMDGPUFunctionCalls if they are marked
603 // noinline, so this is always required.
604 setRequiresCodeGenSCCOrder(true);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000605 }
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000606
607 GCNTargetMachine &getGCNTargetMachine() const {
608 return getTM<GCNTargetMachine>();
609 }
610
611 ScheduleDAGInstrs *
Matt Arsenault03d85842016-06-27 20:32:13 +0000612 createMachineScheduler(MachineSchedContext *C) const override;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000613
Tom Stellard45bb48e2015-06-13 03:28:10 +0000614 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000615 void addMachineSSAOptimization() override;
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000616 bool addILPOpts() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000617 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000618 bool addIRTranslator() override;
Tim Northover33b07d62016-07-22 20:03:43 +0000619 bool addLegalizeMachineIR() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000620 bool addRegBankSelect() override;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000621 bool addGlobalInstructionSelect() override;
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000622 void addFastRegAlloc() override;
623 void addOptimizedRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000624 void addPreRegAlloc() override;
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000625 bool addPreRewrite() override;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000626 void addPostRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000627 void addPreSched2() override;
628 void addPreEmitPass() override;
629};
630
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000631} // end anonymous namespace
Tom Stellard45bb48e2015-06-13 03:28:10 +0000632
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000633void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
634 if (getOptLevel() == CodeGenOpt::Aggressive)
635 addPass(createGVNPass());
636 else
637 addPass(createEarlyCSEPass());
638}
639
640void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
Stanislav Mekhanoshin20d47952018-06-29 16:26:53 +0000641 addPass(createLICMPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000642 addPass(createSeparateConstOffsetFromGEPPass());
643 addPass(createSpeculativeExecutionPass());
644 // ReassociateGEPs exposes more opportunites for SLSR. See
645 // the example in reassociate-geps-and-slsr.ll.
646 addPass(createStraightLineStrengthReducePass());
647 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
648 // EarlyCSE can reuse.
649 addEarlyCSEOrGVNPass();
650 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
651 addPass(createNaryReassociatePass());
652 // NaryReassociate on GEPs creates redundant common expressions, so run
653 // EarlyCSE after it.
654 addPass(createEarlyCSEPass());
655}
656
Tom Stellard45bb48e2015-06-13 03:28:10 +0000657void AMDGPUPassConfig::addIRPasses() {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000658 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
659
Matt Arsenaultbde80342016-05-18 15:41:07 +0000660 // There is no reason to run these.
661 disablePass(&StackMapLivenessID);
662 disablePass(&FuncletLayoutID);
663 disablePass(&PatchableFunctionID);
664
Stanislav Mekhanoshin4c9c98f2019-08-12 17:12:29 +0000665 addPass(createAMDGPUPrintfRuntimeBinding());
666
Stanislav Mekhanoshin31382782019-06-17 20:42:48 +0000667 // This must occur before inlining, as the inliner will not look through
668 // bitcast calls.
669 addPass(createAMDGPUFixFunctionBitcastsPass());
670
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000671 // A call to propagate attributes pass in the backend in case opt was not run.
672 addPass(createAMDGPUPropagateAttributesEarlyPass(&TM));
673
Matt Arsenaultab411932018-10-02 03:50:56 +0000674 addPass(createAtomicExpandPass());
Scott Linder11ef7982018-10-26 13:18:36 +0000675
Scott Linder11ef7982018-10-26 13:18:36 +0000676
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000677 addPass(createAMDGPULowerIntrinsicsPass());
Matt Arsenault0699ef32017-02-09 22:00:42 +0000678
Matt Arsenault635d4792018-10-03 02:47:25 +0000679 // Function calls are not supported, so make sure we inline everything.
680 addPass(createAMDGPUAlwaysInlinePass());
681 addPass(createAlwaysInlinerLegacyPass());
682 // We need to add the barrier noop pass, otherwise adding the function
683 // inlining pass will cause all of the PassConfigs passes to be run
684 // one function at a time, which means if we have a nodule with two
685 // functions, then we will generate code for the first function
686 // without ever running any passes on the second.
687 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000688
Tom Stellardfd253952015-08-07 23:19:30 +0000689 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
Matt Arsenault432aaea2018-05-13 10:04:48 +0000690 if (TM.getTargetTriple().getArch() == Triple::r600)
691 addPass(createR600OpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000692
Yaxun Liude4b88d2017-10-10 19:39:48 +0000693 // Replace OpenCL enqueued block function pointers with global variables.
694 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
695
Matt Arsenault03d85842016-06-27 20:32:13 +0000696 if (TM.getOptLevel() > CodeGenOpt::None) {
Matt Arsenault417e0072017-02-08 06:16:04 +0000697 addPass(createInferAddressSpacesPass());
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000698 addPass(createAMDGPUPromoteAlloca());
Matt Arsenault03d85842016-06-27 20:32:13 +0000699
700 if (EnableSROA)
701 addPass(createSROAPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000702
Nikita Popov3db93ac2019-04-07 17:22:16 +0000703 if (EnableScalarIRPasses)
704 addStraightLineScalarOptimizationPasses();
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000705
706 if (EnableAMDGPUAliasAnalysis) {
707 addPass(createAMDGPUAAWrapperPass());
708 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
709 AAResults &AAR) {
710 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
711 AAR.addAAResult(WrapperPass->getResult());
712 }));
713 }
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000714 }
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000715
Matt Arsenault3b959862019-08-27 00:08:31 +0000716 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
717 // TODO: May want to move later or split into an early and late one.
718 addPass(createAMDGPUCodeGenPreparePass());
719 }
720
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000721 TargetPassConfig::addIRPasses();
722
723 // EarlyCSE is not always strong enough to clean up what LSR produces. For
724 // example, GVN can combine
725 //
726 // %0 = add %a, %b
727 // %1 = add %b, %a
728 //
729 // and
730 //
731 // %0 = shl nsw %a, 2
732 // %1 = shl %a, 2
733 //
734 // but EarlyCSE can do neither of them.
Nikita Popov3db93ac2019-04-07 17:22:16 +0000735 if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses)
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000736 addEarlyCSEOrGVNPass();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000737}
738
Matt Arsenault908b9e22016-07-01 03:33:52 +0000739void AMDGPUPassConfig::addCodeGenPrepare() {
Aakanksha Patilc56d2af2019-03-07 00:54:04 +0000740 if (TM->getTargetTriple().getArch() == Triple::amdgcn)
741 addPass(createAMDGPUAnnotateKernelFeaturesPass());
742
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000743 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
744 EnableLowerKernelArguments)
745 addPass(createAMDGPULowerKernelArgumentsPass());
746
Matt Arsenaulte7e23e32019-07-05 20:26:13 +0000747 addPass(&AMDGPUPerfHintAnalysisID);
748
Matt Arsenault908b9e22016-07-01 03:33:52 +0000749 TargetPassConfig::addCodeGenPrepare();
750
751 if (EnableLoadStoreVectorizer)
752 addPass(createLoadStoreVectorizerPass());
753}
754
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000755bool AMDGPUPassConfig::addPreISel() {
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +0000756 addPass(createLowerSwitchPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000757 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000758 return false;
759}
760
761bool AMDGPUPassConfig::addInstSelector() {
Matt Arsenault9cac4e62019-06-19 00:25:39 +0000762 // Defer the verifier until FinalizeISel.
763 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000764 return false;
765}
766
Matt Arsenault0a109002015-09-25 17:41:20 +0000767bool AMDGPUPassConfig::addGCPasses() {
768 // Do nothing. GC is not supported.
769 return false;
770}
771
Tom Stellard45bb48e2015-06-13 03:28:10 +0000772//===----------------------------------------------------------------------===//
773// R600 Pass Setup
774//===----------------------------------------------------------------------===//
775
776bool R600PassConfig::addPreISel() {
777 AMDGPUPassConfig::addPreISel();
Matt Arsenaultc5816112016-06-24 06:30:22 +0000778
779 if (EnableR600StructurizeCFG)
Tom Stellardbc4497b2016-02-12 23:45:29 +0000780 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000781 return false;
782}
783
Tom Stellard20287692017-08-08 04:57:55 +0000784bool R600PassConfig::addInstSelector() {
785 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
786 return false;
787}
788
Tom Stellard45bb48e2015-06-13 03:28:10 +0000789void R600PassConfig::addPreRegAlloc() {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000790 addPass(createR600VectorRegMerger());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000791}
792
793void R600PassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000794 addPass(createR600EmitClauseMarkers(), false);
Matt Arsenault03d85842016-06-27 20:32:13 +0000795 if (EnableR600IfConvert)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000796 addPass(&IfConverterID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000797 addPass(createR600ClauseMergePass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000798}
799
800void R600PassConfig::addPreEmitPass() {
801 addPass(createAMDGPUCFGStructurizerPass(), false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000802 addPass(createR600ExpandSpecialInstrsPass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000803 addPass(&FinalizeMachineBundlesID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000804 addPass(createR600Packetizer(), false);
805 addPass(createR600ControlFlowFinalizer(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000806}
807
808TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +0000809 return new R600PassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000810}
811
812//===----------------------------------------------------------------------===//
813// GCN Pass Setup
814//===----------------------------------------------------------------------===//
815
Matt Arsenault03d85842016-06-27 20:32:13 +0000816ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
817 MachineSchedContext *C) const {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000818 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000819 if (ST.enableSIScheduler())
820 return createSIMachineScheduler(C);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000821 return createGCNMaxOccupancyMachineScheduler(C);
Matt Arsenault03d85842016-06-27 20:32:13 +0000822}
823
Tom Stellard45bb48e2015-06-13 03:28:10 +0000824bool GCNPassConfig::addPreISel() {
825 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000826
Neil Henning66416572018-10-08 15:49:19 +0000827 if (EnableAtomicOptimizations) {
828 addPass(createAMDGPUAtomicOptimizerPass());
829 }
830
Matt Arsenault39319482015-11-06 18:01:57 +0000831 // FIXME: We need to run a pass to propagate the attributes when calls are
832 // supported.
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000833
834 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
835 // regions formed by them.
836 addPass(&AMDGPUUnifyDivergentExitNodesID);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000837 if (!LateCFGStructurize) {
838 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
839 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000840 addPass(createSinkingPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000841 addPass(createAMDGPUAnnotateUniformValues());
Jan Sjodina06bfe02017-05-15 20:18:37 +0000842 if (!LateCFGStructurize) {
843 addPass(createSIAnnotateControlFlowPass());
844 }
Alexander Timofeev2ce560f2019-07-02 17:59:44 +0000845 addPass(createLCSSAPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000846
Tom Stellard45bb48e2015-06-13 03:28:10 +0000847 return false;
848}
849
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000850void GCNPassConfig::addMachineSSAOptimization() {
851 TargetPassConfig::addMachineSSAOptimization();
852
853 // We want to fold operands after PeepholeOptimizer has run (or as part of
854 // it), because it will eliminate extra copies making it easier to fold the
855 // real source operand. We want to eliminate dead instructions after, so that
856 // we see fewer uses of the copies. We then need to clean up the dead
857 // instructions leftover after the operands are folded as well.
858 //
859 // XXX - Can we get away without running DeadMachineInstructionElim again?
860 addPass(&SIFoldOperandsID);
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000861 if (EnableDPPCombine)
862 addPass(&GCNDPPCombineID);
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000863 addPass(&DeadMachineInstructionElimID);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000864 addPass(&SILoadStoreOptimizerID);
Sam Kolton6e795292017-04-07 10:53:12 +0000865 if (EnableSDWAPeephole) {
866 addPass(&SIPeepholeSDWAID);
Matthias Braun4a7c8e72018-01-19 06:46:10 +0000867 addPass(&EarlyMachineLICMID);
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000868 addPass(&MachineCSEID);
869 addPass(&SIFoldOperandsID);
Sam Kolton6e795292017-04-07 10:53:12 +0000870 addPass(&DeadMachineInstructionElimID);
871 }
Stanislav Mekhanoshin03306602017-06-03 17:39:47 +0000872 addPass(createSIShrinkInstructionsPass());
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000873}
874
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000875bool GCNPassConfig::addILPOpts() {
876 if (EnableEarlyIfConversion)
877 addPass(&EarlyIfConverterID);
878
879 TargetPassConfig::addILPOpts();
880 return false;
881}
882
Tom Stellard45bb48e2015-06-13 03:28:10 +0000883bool GCNPassConfig::addInstSelector() {
884 AMDGPUPassConfig::addInstSelector();
Matt Arsenault782c03b2015-11-03 22:30:13 +0000885 addPass(&SIFixSGPRCopiesID);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000886 addPass(createSILowerI1CopiesPass());
Ron Liebermancac749a2018-11-16 01:13:34 +0000887 addPass(createSIFixupVectorISelPass());
David Stuttardf77079f2019-01-14 11:55:24 +0000888 addPass(createSIAddIMGInitPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000889 return false;
890}
891
Tom Stellard000c5af2016-04-14 19:09:28 +0000892bool GCNPassConfig::addIRTranslator() {
893 addPass(new IRTranslator());
894 return false;
895}
896
Tim Northover33b07d62016-07-22 20:03:43 +0000897bool GCNPassConfig::addLegalizeMachineIR() {
Tom Stellardca166212017-01-30 21:56:46 +0000898 addPass(new Legalizer());
Tim Northover33b07d62016-07-22 20:03:43 +0000899 return false;
900}
901
Tom Stellard000c5af2016-04-14 19:09:28 +0000902bool GCNPassConfig::addRegBankSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000903 addPass(new RegBankSelect());
Tom Stellard000c5af2016-04-14 19:09:28 +0000904 return false;
905}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000906
907bool GCNPassConfig::addGlobalInstructionSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000908 addPass(new InstructionSelect());
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000909 return false;
910}
Tom Stellardca166212017-01-30 21:56:46 +0000911
Tom Stellard45bb48e2015-06-13 03:28:10 +0000912void GCNPassConfig::addPreRegAlloc() {
Jan Sjodina06bfe02017-05-15 20:18:37 +0000913 if (LateCFGStructurize) {
914 addPass(createAMDGPUMachineCFGStructurizerPass());
915 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000916 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000917}
918
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000919void GCNPassConfig::addFastRegAlloc() {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000920 // FIXME: We have to disable the verifier here because of PHIElimination +
921 // TwoAddressInstructions disabling it.
Matt Arsenaulte6740752016-09-29 01:44:16 +0000922
923 // This must be run immediately after phi elimination and before
924 // TwoAddressInstructions, otherwise the processing of the tied operand of
925 // SI_ELSE will introduce a copy of the tied operand source after the else.
926 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000927
Neil Henning0a30f332019-04-01 15:19:52 +0000928 // This must be run just after RegisterCoalescing.
929 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
Connor Abbott92638ab2017-08-04 18:36:52 +0000930
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000931 TargetPassConfig::addFastRegAlloc();
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000932}
933
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000934void GCNPassConfig::addOptimizedRegAlloc() {
Matt Arsenault4d47ac32019-03-27 16:58:30 +0000935 if (OptExecMaskPreRA) {
936 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
937 insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
938 } else {
939 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
940 }
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000941
Matt Arsenaulte6740752016-09-29 01:44:16 +0000942 // This must be run immediately after phi elimination and before
943 // TwoAddressInstructions, otherwise the processing of the tied operand of
944 // SI_ELSE will introduce a copy of the tied operand source after the else.
945 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000946
Neil Henning0a30f332019-04-01 15:19:52 +0000947 // This must be run just after RegisterCoalescing.
948 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
Connor Abbott92638ab2017-08-04 18:36:52 +0000949
Stanislav Mekhanoshinc8f78f82019-04-05 20:11:32 +0000950 if (EnableDCEInRA)
951 insertPass(&RenameIndependentSubregsID, &DeadMachineInstructionElimID);
952
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000953 TargetPassConfig::addOptimizedRegAlloc();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000954}
955
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000956bool GCNPassConfig::addPreRewrite() {
957 if (EnableRegReassign) {
958 addPass(&GCNNSAReassignID);
Stanislav Mekhanoshin3b7925f2019-05-01 16:49:31 +0000959 addPass(&GCNRegBankReassignID);
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000960 }
961 return true;
962}
963
Matt Arsenaulte6740752016-09-29 01:44:16 +0000964void GCNPassConfig::addPostRegAlloc() {
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000965 addPass(&SIFixVGPRCopiesID);
Matt Arsenault105fc1a2018-11-26 17:02:02 +0000966 if (getOptLevel() > CodeGenOpt::None)
967 addPass(&SIOptimizeExecMaskingID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000968 TargetPassConfig::addPostRegAlloc();
Matt Arsenault5b0922f2019-07-03 23:32:29 +0000969
970 // Equivalent of PEI for SGPRs.
971 addPass(&SILowerSGPRSpillsID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000972}
973
Tom Stellard45bb48e2015-06-13 03:28:10 +0000974void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000975}
976
977void GCNPassConfig::addPreEmitPass() {
Mark Searles72da47d2018-07-16 10:02:41 +0000978 addPass(createSIMemoryLegalizerPass());
979 addPass(createSIInsertWaitcntsPass());
980 addPass(createSIShrinkInstructionsPass());
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000981 addPass(createSIModeRegisterPass());
Mark Searles72da47d2018-07-16 10:02:41 +0000982
Tom Stellardcb6ba622016-04-30 00:23:06 +0000983 // The hazard recognizer that runs as part of the post-ra scheduler does not
Matt Arsenault254a6452016-06-28 16:59:53 +0000984 // guarantee to be able handle all hazards correctly. This is because if there
985 // are multiple scheduling regions in a basic block, the regions are scheduled
986 // bottom up, so when we begin to schedule a region we don't know what
987 // instructions were emitted directly before it.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000988 //
Matt Arsenault254a6452016-06-28 16:59:53 +0000989 // Here we add a stand-alone hazard recognizer pass which can handle all
990 // cases.
Mark Searles72da47d2018-07-16 10:02:41 +0000991 //
992 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
993 // be better for it to emit S_NOP <N> when possible.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000994 addPass(&PostRAHazardRecognizerID);
995
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000996 addPass(&SIInsertSkipsPassID);
Matt Arsenault6bc43d82016-10-06 16:20:41 +0000997 addPass(&BranchRelaxationPassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000998}
999
1000TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +00001001 return new GCNPassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001002}
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +00001003
1004yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
1005 return new yaml::SIMachineFunctionInfo();
1006}
1007
1008yaml::MachineFunctionInfo *
1009GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
1010 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1011 return new yaml::SIMachineFunctionInfo(*MFI,
1012 *MF.getSubtarget().getRegisterInfo());
1013}
1014
1015bool GCNTargetMachine::parseMachineFunctionInfo(
1016 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
1017 SMDiagnostic &Error, SMRange &SourceRange) const {
1018 const yaml::SIMachineFunctionInfo &YamlMFI =
1019 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1020 MachineFunction &MF = PFS.MF;
1021 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1022
1023 MFI->initializeBaseYamlFields(YamlMFI);
1024
1025 auto parseRegister = [&](const yaml::StringValue &RegName, unsigned &RegVal) {
1026 if (parseNamedRegisterReference(PFS, RegVal, RegName.Value, Error)) {
1027 SourceRange = RegName.SourceRange;
1028 return true;
1029 }
1030
1031 return false;
1032 };
1033
1034 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1035 // Create a diagnostic for a the register string literal.
1036 const MemoryBuffer &Buffer =
1037 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1038 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1039 RegName.Value.size(), SourceMgr::DK_Error,
1040 "incorrect register class for field", RegName.Value,
1041 None, None);
1042 SourceRange = RegName.SourceRange;
1043 return true;
1044 };
1045
1046 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1047 parseRegister(YamlMFI.ScratchWaveOffsetReg, MFI->ScratchWaveOffsetReg) ||
1048 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1049 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1050 return true;
1051
1052 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
Matt Arsenault12994a72019-10-10 07:11:33 +00001053 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +00001054 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1055 }
1056
1057 if (MFI->ScratchWaveOffsetReg != AMDGPU::SCRATCH_WAVE_OFFSET_REG &&
1058 !AMDGPU::SGPR_32RegClass.contains(MFI->ScratchWaveOffsetReg)) {
1059 return diagnoseRegisterClass(YamlMFI.ScratchWaveOffsetReg);
1060 }
1061
1062 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1063 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1064 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1065 }
1066
1067 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1068 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1069 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1070 }
1071
Michael Liao80177ca2019-07-03 02:00:21 +00001072 auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A,
1073 const TargetRegisterClass &RC,
Michael Liaob3f967d2019-07-16 15:57:12 +00001074 ArgDescriptor &Arg, unsigned UserSGPRs,
1075 unsigned SystemSGPRs) {
Michael Liao80177ca2019-07-03 02:00:21 +00001076 // Skip parsing if it's not present.
1077 if (!A)
1078 return false;
1079
1080 if (A->IsRegister) {
1081 unsigned Reg;
Michael Liaob3f967d2019-07-16 15:57:12 +00001082 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
Michael Liao80177ca2019-07-03 02:00:21 +00001083 SourceRange = A->RegisterName.SourceRange;
1084 return true;
1085 }
1086 if (!RC.contains(Reg))
1087 return diagnoseRegisterClass(A->RegisterName);
1088 Arg = ArgDescriptor::createRegister(Reg);
1089 } else
1090 Arg = ArgDescriptor::createStack(A->StackOffset);
1091 // Check and apply the optional mask.
1092 if (A->Mask)
1093 Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
1094
Michael Liaob3f967d2019-07-16 15:57:12 +00001095 MFI->NumUserSGPRs += UserSGPRs;
1096 MFI->NumSystemSGPRs += SystemSGPRs;
Michael Liao80177ca2019-07-03 02:00:21 +00001097 return false;
1098 };
1099
1100 if (YamlMFI.ArgInfo &&
1101 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
Matt Arsenault12994a72019-10-10 07:11:33 +00001102 AMDGPU::SGPR_128RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001103 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001104 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
Michael Liaob3f967d2019-07-16 15:57:12 +00001105 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1106 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001107 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001108 MFI->ArgInfo.QueuePtr, 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001109 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1110 AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001111 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001112 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
Michael Liaob3f967d2019-07-16 15:57:12 +00001113 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1114 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001115 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1116 AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001117 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001118 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1119 AMDGPU::SGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001120 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001121 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
Michael Liaob3f967d2019-07-16 15:57:12 +00001122 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1123 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001124 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
Michael Liaob3f967d2019-07-16 15:57:12 +00001125 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1126 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001127 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
Michael Liaob3f967d2019-07-16 15:57:12 +00001128 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1129 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001130 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1131 AMDGPU::SGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001132 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001133 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1134 AMDGPU::SGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001135 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001136 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1137 AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001138 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001139 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1140 AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001141 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001142 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1143 AMDGPU::VGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001144 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001145 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1146 AMDGPU::VGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001147 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001148 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1149 AMDGPU::VGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001150 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
Michael Liao80177ca2019-07-03 02:00:21 +00001151 return true;
1152
Matt Arsenault58426a32019-07-10 16:09:26 +00001153 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1154 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
Matt Arsenault19e7f8a2019-10-27 23:38:52 -07001155 MFI->Mode.FP32Denormals = YamlMFI.Mode.FP32Denormals;
1156 MFI->Mode.FP64FP16Denormals = YamlMFI.Mode.FP64FP16Denormals;
Matt Arsenault58426a32019-07-10 16:09:26 +00001157
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +00001158 return false;
1159}