blob: e8cf77161a14d69d51c2cfb7ecb256b51cb762f1 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard45bb48e2015-06-13 03:28:10 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// The AMDGPU target machine contains all of the hardware specific
Tom Stellard45bb48e2015-06-13 03:28:10 +000011/// information needed to emit code for R600 and SI GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +000017#include "AMDGPUAliasAnalysis.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000018#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000019#include "AMDGPUInstructionSelector.h"
20#include "AMDGPULegalizerInfo.h"
Matt Arsenault9aa45f02017-07-06 20:57:05 +000021#include "AMDGPUMacroFusion.h"
Matt Arsenaulteb9025d2016-06-28 17:42:09 +000022#include "AMDGPUTargetObjectFile.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "AMDGPUTargetTransformInfo.h"
Valery Pykhtinfd4c4102017-03-21 13:15:46 +000024#include "GCNIterativeScheduler.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000025#include "GCNSchedStrategy.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "R600MachineScheduler.h"
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000027#include "SIMachineFunctionInfo.h"
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +000028#include "SIMachineScheduler.h"
Richard Trieu8ce2ee92019-05-14 21:54:37 +000029#include "TargetInfo/AMDGPUTargetInfo.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000030#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000031#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
Tom Stellardca166212017-01-30 21:56:46 +000032#include "llvm/CodeGen/GlobalISel/Legalizer.h"
33#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000034#include "llvm/CodeGen/MIRParser/MIParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/CodeGen/Passes.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000036#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000037#include "llvm/IR/Attributes.h"
38#include "llvm/IR/Function.h"
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +000039#include "llvm/IR/LegacyPassManager.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000040#include "llvm/Pass.h"
41#include "llvm/Support/CommandLine.h"
42#include "llvm/Support/Compiler.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000043#include "llvm/Support/TargetRegistry.h"
David Blaikie6054e652018-03-23 23:58:19 +000044#include "llvm/Target/TargetLoweringObjectFile.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000045#include "llvm/Transforms/IPO.h"
46#include "llvm/Transforms/IPO/AlwaysInliner.h"
47#include "llvm/Transforms/IPO/PassManagerBuilder.h"
48#include "llvm/Transforms/Scalar.h"
49#include "llvm/Transforms/Scalar/GVN.h"
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +000050#include "llvm/Transforms/Utils.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000051#include "llvm/Transforms/Vectorize.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000052#include <memory>
Tom Stellard45bb48e2015-06-13 03:28:10 +000053
54using namespace llvm;
55
Matt Arsenaultc5816112016-06-24 06:30:22 +000056static cl::opt<bool> EnableR600StructurizeCFG(
57 "r600-ir-structurize",
58 cl::desc("Use StructurizeCFG IR pass"),
59 cl::init(true));
60
Matt Arsenault03d85842016-06-27 20:32:13 +000061static cl::opt<bool> EnableSROA(
62 "amdgpu-sroa",
63 cl::desc("Run SROA after promote alloca pass"),
64 cl::ReallyHidden,
65 cl::init(true));
66
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +000067static cl::opt<bool>
68EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
69 cl::desc("Run early if-conversion"),
70 cl::init(false));
71
Matt Arsenault4d47ac32019-03-27 16:58:30 +000072static cl::opt<bool>
73OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
74 cl::desc("Run pre-RA exec mask optimizations"),
75 cl::init(true));
76
Matt Arsenault03d85842016-06-27 20:32:13 +000077static cl::opt<bool> EnableR600IfConvert(
78 "r600-if-convert",
79 cl::desc("Use if conversion pass"),
80 cl::ReallyHidden,
81 cl::init(true));
82
Matt Arsenault908b9e22016-07-01 03:33:52 +000083// Option to disable vectorizer for tests.
84static cl::opt<bool> EnableLoadStoreVectorizer(
85 "amdgpu-load-store-vectorizer",
86 cl::desc("Enable load store vectorizer"),
Matt Arsenault0efdd062016-09-09 22:29:28 +000087 cl::init(true),
Matt Arsenault908b9e22016-07-01 03:33:52 +000088 cl::Hidden);
89
Hiroshi Inouec8e92452018-01-29 05:17:03 +000090// Option to control global loads scalarization
Alexander Timofeev18009562016-12-08 17:28:47 +000091static cl::opt<bool> ScalarizeGlobal(
92 "amdgpu-scalarize-global-loads",
93 cl::desc("Enable global load scalarization"),
Alexander Timofeev982aee62017-07-04 17:32:00 +000094 cl::init(true),
Alexander Timofeev18009562016-12-08 17:28:47 +000095 cl::Hidden);
96
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +000097// Option to run internalize pass.
98static cl::opt<bool> InternalizeSymbols(
99 "amdgpu-internalize-symbols",
100 cl::desc("Enable elimination of non-kernel functions and unused globals"),
101 cl::init(false),
102 cl::Hidden);
103
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000104// Option to inline all early.
105static cl::opt<bool> EarlyInlineAll(
106 "amdgpu-early-inline-all",
107 cl::desc("Inline all functions early"),
108 cl::init(false),
109 cl::Hidden);
110
Sam Koltonf60ad582017-03-21 12:51:34 +0000111static cl::opt<bool> EnableSDWAPeephole(
112 "amdgpu-sdwa-peephole",
113 cl::desc("Enable SDWA peepholer"),
Sam Kolton9fa16962017-04-06 15:03:28 +0000114 cl::init(true));
Sam Koltonf60ad582017-03-21 12:51:34 +0000115
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000116static cl::opt<bool> EnableDPPCombine(
117 "amdgpu-dpp-combine",
118 cl::desc("Enable DPP combiner"),
Valery Pykhtinded96df2019-02-11 11:15:03 +0000119 cl::init(true));
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000120
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000121// Enable address space based alias analysis
122static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
123 cl::desc("Enable AMDGPU Alias Analysis"),
124 cl::init(true));
125
Jan Sjodina06bfe02017-05-15 20:18:37 +0000126// Option to run late CFG structurizer
Matt Arsenaultcc852232017-10-10 20:22:07 +0000127static cl::opt<bool, true> LateCFGStructurize(
Jan Sjodina06bfe02017-05-15 20:18:37 +0000128 "amdgpu-late-structurize",
129 cl::desc("Enable late CFG structurization"),
Matt Arsenaultcc852232017-10-10 20:22:07 +0000130 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
Jan Sjodina06bfe02017-05-15 20:18:37 +0000131 cl::Hidden);
132
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000133static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt(
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000134 "amdgpu-function-calls",
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000135 cl::desc("Enable AMDGPU function call support"),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000136 cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000137 cl::init(true),
Matt Arsenaulta6801992018-07-10 14:03:41 +0000138 cl::Hidden);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000139
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000140// Enable lib calls simplifications
141static cl::opt<bool> EnableLibCallSimplify(
142 "amdgpu-simplify-libcall",
Matt Arsenault2e4d3382018-05-29 19:35:46 +0000143 cl::desc("Enable amdgpu library simplifications"),
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000144 cl::init(true),
145 cl::Hidden);
146
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000147static cl::opt<bool> EnableLowerKernelArguments(
148 "amdgpu-ir-lower-kernel-arguments",
149 cl::desc("Lower kernel argument loads in IR pass"),
150 cl::init(true),
151 cl::Hidden);
152
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000153static cl::opt<bool> EnableRegReassign(
154 "amdgpu-reassign-regs",
155 cl::desc("Enable register reassign optimizations on gfx10+"),
156 cl::init(true),
157 cl::Hidden);
158
Neil Henning66416572018-10-08 15:49:19 +0000159// Enable atomic optimization
160static cl::opt<bool> EnableAtomicOptimizations(
161 "amdgpu-atomic-optimizations",
162 cl::desc("Enable atomic optimizations"),
163 cl::init(false),
164 cl::Hidden);
165
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000166// Enable Mode register optimization
167static cl::opt<bool> EnableSIModeRegisterPass(
168 "amdgpu-mode-register",
169 cl::desc("Enable mode register pass"),
170 cl::init(true),
171 cl::Hidden);
172
Stanislav Mekhanoshinc8f78f82019-04-05 20:11:32 +0000173// Option is used in lit tests to prevent deadcoding of patterns inspected.
174static cl::opt<bool>
175EnableDCEInRA("amdgpu-dce-in-ra",
176 cl::init(true), cl::Hidden,
177 cl::desc("Enable machine DCE inside regalloc"));
178
Nikita Popov3db93ac2019-04-07 17:22:16 +0000179static cl::opt<bool> EnableScalarIRPasses(
180 "amdgpu-scalar-ir-passes",
181 cl::desc("Enable scalar IR passes"),
182 cl::init(true),
183 cl::Hidden);
184
Tom Stellard4b0b2612019-06-11 03:21:13 +0000185extern "C" void LLVMInitializeAMDGPUTarget() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000186 // Register the target
Mehdi Aminif42454b2016-10-09 23:00:34 +0000187 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
188 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000189
190 PassRegistry *PR = PassRegistry::getPassRegistry();
Tom Stellarda2f57be2017-08-02 22:19:45 +0000191 initializeR600ClauseMergePassPass(*PR);
192 initializeR600ControlFlowFinalizerPass(*PR);
193 initializeR600PacketizerPass(*PR);
194 initializeR600ExpandSpecialInstrsPassPass(*PR);
195 initializeR600VectorRegMergerPass(*PR);
Tom Stellarde753c522018-04-09 16:09:13 +0000196 initializeGlobalISel(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000197 initializeAMDGPUDAGToDAGISelPass(*PR);
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000198 initializeGCNDPPCombinePass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000199 initializeSILowerI1CopiesPass(*PR);
Matt Arsenault5b0922f2019-07-03 23:32:29 +0000200 initializeSILowerSGPRSpillsPass(*PR);
Matt Arsenault782c03b2015-11-03 22:30:13 +0000201 initializeSIFixSGPRCopiesPass(*PR);
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000202 initializeSIFixVGPRCopiesPass(*PR);
Ron Liebermancac749a2018-11-16 01:13:34 +0000203 initializeSIFixupVectorISelPass(*PR);
Matt Arsenault8c0ef8b2015-10-12 17:43:59 +0000204 initializeSIFoldOperandsPass(*PR);
Sam Koltonf60ad582017-03-21 12:51:34 +0000205 initializeSIPeepholeSDWAPass(*PR);
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +0000206 initializeSIShrinkInstructionsPass(*PR);
Stanislav Mekhanoshin37e7f952017-08-01 23:14:32 +0000207 initializeSIOptimizeExecMaskingPreRAPass(*PR);
Matt Arsenault187276f2015-10-07 00:42:53 +0000208 initializeSILoadStoreOptimizerPass(*PR);
Scott Linder11ef7982018-10-26 13:18:36 +0000209 initializeAMDGPUFixFunctionBitcastsPass(*PR);
Matt Arsenault746e0652017-06-02 18:02:42 +0000210 initializeAMDGPUAlwaysInlinePass(*PR);
Matt Arsenault39319482015-11-06 18:01:57 +0000211 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
Tom Stellarda6f24c62015-12-15 20:55:55 +0000212 initializeAMDGPUAnnotateUniformValuesPass(*PR);
Matt Arsenault7016f132017-08-03 22:30:46 +0000213 initializeAMDGPUArgumentUsageInfoPass(*PR);
Neil Henning66416572018-10-08 15:49:19 +0000214 initializeAMDGPUAtomicOptimizerPass(*PR);
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000215 initializeAMDGPULowerKernelArgumentsPass(*PR);
Matt Arsenault372d7962018-05-18 21:35:00 +0000216 initializeAMDGPULowerKernelAttributesPass(*PR);
Matt Arsenault0699ef32017-02-09 22:00:42 +0000217 initializeAMDGPULowerIntrinsicsPass(*PR);
Yaxun Liude4b88d2017-10-10 19:39:48 +0000218 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000219 initializeAMDGPUPromoteAllocaPass(*PR);
Matt Arsenault86de4862016-06-24 07:07:55 +0000220 initializeAMDGPUCodeGenPreparePass(*PR);
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000221 initializeAMDGPUPropagateAttributesEarlyPass(*PR);
222 initializeAMDGPUPropagateAttributesLatePass(*PR);
Matt Arsenaultc06574f2017-07-28 18:40:05 +0000223 initializeAMDGPURewriteOutArgumentsPass(*PR);
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000224 initializeAMDGPUUnifyMetadataPass(*PR);
Tom Stellard77a17772016-01-20 15:48:27 +0000225 initializeSIAnnotateControlFlowPass(*PR);
Kannan Narayananacb089e2017-04-12 03:25:12 +0000226 initializeSIInsertWaitcntsPass(*PR);
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000227 initializeSIModeRegisterPass(*PR);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000228 initializeSIWholeQuadModePass(*PR);
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000229 initializeSILowerControlFlowPass(*PR);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000230 initializeSIInsertSkipsPass(*PR);
Konstantin Zhuravlyove9a5a772017-07-21 21:19:23 +0000231 initializeSIMemoryLegalizerPass(*PR);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000232 initializeSIOptimizeExecMaskingPass(*PR);
Neil Henning0a30f332019-04-01 15:19:52 +0000233 initializeSIPreAllocateWWMRegsPass(*PR);
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000234 initializeSIFormMemoryClausesPass(*PR);
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000235 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000236 initializeAMDGPUAAWrapperPassPass(*PR);
Matt Arsenault8ba740a2018-11-07 20:26:42 +0000237 initializeAMDGPUExternalAAWrapperPass(*PR);
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000238 initializeAMDGPUUseNativeCallsPass(*PR);
239 initializeAMDGPUSimplifyLibCallsPass(*PR);
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000240 initializeAMDGPUInlinerPass(*PR);
Stanislav Mekhanoshin4c9c98f2019-08-12 17:12:29 +0000241 initializeAMDGPUPrintfRuntimeBindingPass(*PR);
Stanislav Mekhanoshin3b7925f2019-05-01 16:49:31 +0000242 initializeGCNRegBankReassignPass(*PR);
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000243 initializeGCNNSAReassignPass(*PR);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000244}
245
Tom Stellarde135ffd2015-09-25 21:41:28 +0000246static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000247 return std::make_unique<AMDGPUTargetObjectFile>();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000248}
249
Tom Stellard45bb48e2015-06-13 03:28:10 +0000250static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000251 return new ScheduleDAGMILive(C, std::make_unique<R600SchedStrategy>());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000252}
253
Matt Arsenault2ffe8fd2016-08-11 19:18:50 +0000254static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
255 return new SIScheduleDAGMI(C);
256}
257
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000258static ScheduleDAGInstrs *
259createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
260 ScheduleDAGMILive *DAG =
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000261 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
Matthias Braun115efcd2016-11-28 20:11:54 +0000262 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
263 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
Matt Arsenault9aa45f02017-07-06 20:57:05 +0000264 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000265 return DAG;
266}
267
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000268static ScheduleDAGInstrs *
269createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
270 auto DAG = new GCNIterativeScheduler(C,
271 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
272 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
273 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
274 return DAG;
275}
276
277static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
278 return new GCNIterativeScheduler(C,
279 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
280}
281
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000282static ScheduleDAGInstrs *
283createIterativeILPMachineScheduler(MachineSchedContext *C) {
284 auto DAG = new GCNIterativeScheduler(C,
285 GCNIterativeScheduler::SCHEDULE_ILP);
286 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
287 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
288 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
289 return DAG;
290}
291
Tom Stellard45bb48e2015-06-13 03:28:10 +0000292static MachineSchedRegistry
Nicolai Haehnle02c32912016-01-13 16:10:10 +0000293R600SchedRegistry("r600", "Run R600's custom scheduler",
294 createR600MachineScheduler);
295
296static MachineSchedRegistry
297SISchedRegistry("si", "Run SI's custom scheduler",
298 createSIMachineScheduler);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000299
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000300static MachineSchedRegistry
301GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
302 "Run GCN scheduler to maximize occupancy",
303 createGCNMaxOccupancyMachineScheduler);
304
Valery Pykhtinfd4c4102017-03-21 13:15:46 +0000305static MachineSchedRegistry
306IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
307 "Run GCN scheduler to maximize occupancy (experimental)",
308 createIterativeGCNMaxOccupancyMachineScheduler);
309
310static MachineSchedRegistry
311GCNMinRegSchedRegistry("gcn-minreg",
312 "Run GCN iterative scheduler for minimal register usage (experimental)",
313 createMinRegScheduler);
314
Valery Pykhtinf2fe9722017-11-20 14:35:53 +0000315static MachineSchedRegistry
316GCNILPSchedRegistry("gcn-ilp",
317 "Run GCN iterative scheduler for ILP scheduling (experimental)",
318 createIterativeILPMachineScheduler);
319
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000320static StringRef computeDataLayout(const Triple &TT) {
321 if (TT.getArch() == Triple::r600) {
322 // 32-bit pointers.
Yaxun Liucc56a8b2017-11-06 14:32:33 +0000323 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Matt Arsenault95329f82018-03-27 19:26:40 +0000324 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000325 }
326
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000327 // 32-bit private, local, and region pointers. 64-bit global, constant and
Neil Henning523dab02019-03-18 14:44:28 +0000328 // flat, non-integral buffer fat pointers.
Yaxun Liu0124b542018-02-13 18:00:25 +0000329 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
Matt Arsenaultec30eb52016-05-31 16:57:45 +0000330 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
Neil Henning523dab02019-03-18 14:44:28 +0000331 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
332 "-ni:7";
Tom Stellard45bb48e2015-06-13 03:28:10 +0000333}
334
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000335LLVM_READNONE
336static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
337 if (!GPU.empty())
338 return GPU;
339
Matt Arsenaulte0c1f9e2019-03-17 21:31:35 +0000340 // Need to default to a target with flat support for HSA.
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000341 if (TT.getArch() == Triple::amdgcn)
Matt Arsenaulte0c1f9e2019-03-17 21:31:35 +0000342 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000343
Matt Arsenault8e001942016-06-02 18:37:16 +0000344 return "r600";
Matt Arsenaultb22828f2016-01-27 02:17:49 +0000345}
346
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000347static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
Tom Stellard418beb72016-07-13 14:23:33 +0000348 // The AMDGPU toolchain only supports generating shared objects, so we
349 // must always use PIC.
350 return Reloc::PIC_;
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000351}
352
Tom Stellard45bb48e2015-06-13 03:28:10 +0000353AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
354 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000355 TargetOptions Options,
356 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000357 Optional<CodeModel::Model> CM,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000358 CodeGenOpt::Level OptLevel)
Matthias Braunbb8507e2017-10-12 22:57:28 +0000359 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
360 FS, Options, getEffectiveRelocModel(RM),
David Greenca29c272018-12-07 12:10:23 +0000361 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
Rafael Espindola79e238a2017-08-03 02:16:21 +0000362 TLOF(createTLOF(getTargetTriple())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000363 initAsmInfo();
364}
365
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000366bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
Matt Arsenaulta6801992018-07-10 14:03:41 +0000367bool AMDGPUTargetMachine::EnableFunctionCalls = false;
368
369AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
Vlad Tsyrklevich688e7522018-07-10 00:46:07 +0000370
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000371StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
372 Attribute GPUAttr = F.getFnAttribute("target-cpu");
373 return GPUAttr.hasAttribute(Attribute::None) ?
374 getTargetCPU() : GPUAttr.getValueAsString();
375}
376
377StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
378 Attribute FSAttr = F.getFnAttribute("target-features");
379
380 return FSAttr.hasAttribute(Attribute::None) ?
381 getTargetFeatureString() :
382 FSAttr.getValueAsString();
383}
384
Matt Arsenaulte745d992017-09-19 07:40:11 +0000385/// Predicate for Internalize pass.
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000386static bool mustPreserveGV(const GlobalValue &GV) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000387 if (const Function *F = dyn_cast<Function>(&GV))
388 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
389
390 return !GV.use_empty();
391}
392
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000393void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
Stanislav Mekhanoshinee2dd782017-03-17 17:13:41 +0000394 Builder.DivergentTarget = true;
395
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000396 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
Matt Arsenaulte745d992017-09-19 07:40:11 +0000397 bool Internalize = InternalizeSymbols;
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000398 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000399 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
400 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000401
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000402 if (EnableFunctionCalls) {
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000403 delete Builder.Inliner;
Stanislav Mekhanoshin56418202017-09-20 06:10:15 +0000404 Builder.Inliner = createAMDGPUFunctionInliningPass();
Stanislav Mekhanoshin2e3bf372017-09-20 06:34:28 +0000405 }
Stanislav Mekhanoshin5670e6d2017-09-20 04:25:58 +0000406
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000407 Builder.addExtension(
Stanislav Mekhanoshinf6c1feb2017-01-27 16:38:10 +0000408 PassManagerBuilder::EP_ModuleOptimizerEarly,
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000409 [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &,
410 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000411 if (AMDGPUAA) {
412 PM.add(createAMDGPUAAWrapperPass());
413 PM.add(createAMDGPUExternalAAWrapperPass());
414 }
Stanislav Mekhanoshin81598112017-01-26 16:49:08 +0000415 PM.add(createAMDGPUUnifyMetadataPass());
Stanislav Mekhanoshin4c9c98f2019-08-12 17:12:29 +0000416 PM.add(createAMDGPUPrintfRuntimeBinding());
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000417 PM.add(createAMDGPUPropagateAttributesLatePass(this));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000418 if (Internalize) {
Matt Arsenaulte745d992017-09-19 07:40:11 +0000419 PM.add(createInternalizePass(mustPreserveGV));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000420 PM.add(createGlobalDCEPass());
421 }
Stanislav Mekhanoshin9053f222017-03-28 18:23:24 +0000422 if (EarlyInline)
Stanislav Mekhanoshin89653df2017-03-30 20:16:02 +0000423 PM.add(createAMDGPUAlwaysInlinePass(false));
Stanislav Mekhanoshina3b72792017-01-30 21:05:18 +0000424 });
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000425
Stanislav Mekhanoshin1d8cf2b2017-09-29 23:40:19 +0000426 const auto &Opt = Options;
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000427 Builder.addExtension(
428 PassManagerBuilder::EP_EarlyAsPossible,
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000429 [AMDGPUAA, LibCallSimplify, &Opt, this](const PassManagerBuilder &,
430 legacy::PassManagerBase &PM) {
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000431 if (AMDGPUAA) {
432 PM.add(createAMDGPUAAWrapperPass());
433 PM.add(createAMDGPUExternalAAWrapperPass());
434 }
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000435 PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this));
Stanislav Mekhanoshin7f377942017-08-11 16:42:09 +0000436 PM.add(llvm::createAMDGPUUseNativeCallsPass());
437 if (LibCallSimplify)
Stanislav Mekhanoshina9191c82019-06-17 17:57:50 +0000438 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt, this));
Stanislav Mekhanoshina27b2ca2017-03-24 18:01:14 +0000439 });
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000440
441 Builder.addExtension(
442 PassManagerBuilder::EP_CGSCCOptimizerLate,
443 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
444 // Add infer address spaces pass to the opt pipeline after inlining
445 // but before SROA to increase SROA opportunities.
446 PM.add(createInferAddressSpacesPass());
Matt Arsenault372d7962018-05-18 21:35:00 +0000447
448 // This should run after inlining to have any chance of doing anything,
449 // and before other cleanup optimizations.
450 PM.add(createAMDGPULowerKernelAttributesPass());
Stanislav Mekhanoshin50c2f252017-06-19 23:17:36 +0000451 });
Stanislav Mekhanoshin50ea93a2016-12-08 19:46:04 +0000452}
453
Tom Stellard45bb48e2015-06-13 03:28:10 +0000454//===----------------------------------------------------------------------===//
455// R600 Target Machine (R600 -> Cayman)
456//===----------------------------------------------------------------------===//
457
458R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000459 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000460 TargetOptions Options,
461 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000462 Optional<CodeModel::Model> CM,
463 CodeGenOpt::Level OL, bool JIT)
464 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000465 setRequiresStructuredCFG(true);
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000466
Matt Arsenault09a09ef2019-02-28 00:52:33 +0000467 // Override the default since calls aren't supported for r600.
Matt Arsenault5d567dc2019-02-28 00:40:32 +0000468 if (EnableFunctionCalls &&
469 EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
470 EnableFunctionCalls = false;
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000471}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000472
473const R600Subtarget *R600TargetMachine::getSubtargetImpl(
474 const Function &F) const {
475 StringRef GPU = getGPUName(F);
476 StringRef FS = getFeatureString(F);
477
478 SmallString<128> SubtargetKey(GPU);
479 SubtargetKey.append(FS);
480
481 auto &I = SubtargetMap[SubtargetKey];
482 if (!I) {
483 // This needs to be done before we create a new subtarget since any
484 // creation will depend on the TM and the code generation flags on the
485 // function that reside in TargetOptions.
486 resetTargetOptions(F);
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000487 I = std::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000488 }
489
490 return I.get();
491}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000492
Tom Stellardc7624312018-05-30 22:55:35 +0000493TargetTransformInfo
494R600TargetMachine::getTargetTransformInfo(const Function &F) {
495 return TargetTransformInfo(R600TTIImpl(this, F));
496}
497
Tom Stellard45bb48e2015-06-13 03:28:10 +0000498//===----------------------------------------------------------------------===//
499// GCN Target Machine (SI+)
500//===----------------------------------------------------------------------===//
501
502GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
Tom Stellard5dde1d22016-02-05 18:29:17 +0000503 StringRef CPU, StringRef FS,
Rafael Espindola8c34dd82016-05-18 22:04:49 +0000504 TargetOptions Options,
505 Optional<Reloc::Model> RM,
Rafael Espindola79e238a2017-08-03 02:16:21 +0000506 Optional<CodeModel::Model> CM,
507 CodeGenOpt::Level OL, bool JIT)
508 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000509
Tom Stellard5bfbae52018-07-11 20:59:01 +0000510const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000511 StringRef GPU = getGPUName(F);
512 StringRef FS = getFeatureString(F);
513
514 SmallString<128> SubtargetKey(GPU);
515 SubtargetKey.append(FS);
516
517 auto &I = SubtargetMap[SubtargetKey];
518 if (!I) {
519 // This needs to be done before we create a new subtarget since any
520 // creation will depend on the TM and the code generation flags on the
521 // function that reside in TargetOptions.
522 resetTargetOptions(F);
Jonas Devlieghere0eaee542019-08-15 15:54:37 +0000523 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000524 }
525
Alexander Timofeev18009562016-12-08 17:28:47 +0000526 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
527
Matt Arsenault59c0ffa2016-06-27 20:48:03 +0000528 return I.get();
529}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000530
Tom Stellardc7624312018-05-30 22:55:35 +0000531TargetTransformInfo
532GCNTargetMachine::getTargetTransformInfo(const Function &F) {
533 return TargetTransformInfo(GCNTTIImpl(this, F));
534}
535
Tom Stellard45bb48e2015-06-13 03:28:10 +0000536//===----------------------------------------------------------------------===//
537// AMDGPU Pass Setup
538//===----------------------------------------------------------------------===//
539
540namespace {
Tom Stellardcc7067a62016-03-03 03:53:29 +0000541
Tom Stellard45bb48e2015-06-13 03:28:10 +0000542class AMDGPUPassConfig : public TargetPassConfig {
543public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000544 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault0a109002015-09-25 17:41:20 +0000545 : TargetPassConfig(TM, PM) {
Matt Arsenault0a109002015-09-25 17:41:20 +0000546 // Exceptions and StackMaps are not supported, so these passes will never do
547 // anything.
548 disablePass(&StackMapLivenessID);
549 disablePass(&FuncletLayoutID);
550 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000551
552 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
553 return getTM<AMDGPUTargetMachine>();
554 }
555
Matthias Braun115efcd2016-11-28 20:11:54 +0000556 ScheduleDAGInstrs *
557 createMachineScheduler(MachineSchedContext *C) const override {
558 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
559 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
560 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
561 return DAG;
562 }
563
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000564 void addEarlyCSEOrGVNPass();
565 void addStraightLineScalarOptimizationPasses();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000566 void addIRPasses() override;
Matt Arsenault908b9e22016-07-01 03:33:52 +0000567 void addCodeGenPrepare() override;
Matt Arsenault0a109002015-09-25 17:41:20 +0000568 bool addPreISel() override;
569 bool addInstSelector() override;
570 bool addGCPasses() override;
Amara Emersond1896802019-04-15 04:53:46 +0000571
572 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000573};
574
Amara Emersond1896802019-04-15 04:53:46 +0000575std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const {
576 return getStandardCSEConfigForOpt(TM->getOptLevel());
577}
578
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000579class R600PassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000580public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000581 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000582 : AMDGPUPassConfig(TM, PM) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000583
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000584 ScheduleDAGInstrs *createMachineScheduler(
585 MachineSchedContext *C) const override {
586 return createR600MachineScheduler(C);
587 }
588
Tom Stellard45bb48e2015-06-13 03:28:10 +0000589 bool addPreISel() override;
Tom Stellard20287692017-08-08 04:57:55 +0000590 bool addInstSelector() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000591 void addPreRegAlloc() override;
592 void addPreSched2() override;
593 void addPreEmitPass() override;
594};
595
Matt Arsenault6b6a2c32016-03-11 08:00:27 +0000596class GCNPassConfig final : public AMDGPUPassConfig {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000597public:
Matthias Braunbb8507e2017-10-12 22:57:28 +0000598 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000599 : AMDGPUPassConfig(TM, PM) {
Matt Arsenaulta2025382017-08-03 23:24:05 +0000600 // It is necessary to know the register usage of the entire call graph. We
601 // allow calls without EnableAMDGPUFunctionCalls if they are marked
602 // noinline, so this is always required.
603 setRequiresCodeGenSCCOrder(true);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +0000604 }
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000605
606 GCNTargetMachine &getGCNTargetMachine() const {
607 return getTM<GCNTargetMachine>();
608 }
609
610 ScheduleDAGInstrs *
Matt Arsenault03d85842016-06-27 20:32:13 +0000611 createMachineScheduler(MachineSchedContext *C) const override;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000612
Tom Stellard45bb48e2015-06-13 03:28:10 +0000613 bool addPreISel() override;
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000614 void addMachineSSAOptimization() override;
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000615 bool addILPOpts() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000616 bool addInstSelector() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000617 bool addIRTranslator() override;
Tim Northover33b07d62016-07-22 20:03:43 +0000618 bool addLegalizeMachineIR() override;
Tom Stellard000c5af2016-04-14 19:09:28 +0000619 bool addRegBankSelect() override;
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000620 bool addGlobalInstructionSelect() override;
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000621 void addFastRegAlloc() override;
622 void addOptimizedRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000623 void addPreRegAlloc() override;
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000624 bool addPreRewrite() override;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000625 void addPostRegAlloc() override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000626 void addPreSched2() override;
627 void addPreEmitPass() override;
628};
629
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000630} // end anonymous namespace
Tom Stellard45bb48e2015-06-13 03:28:10 +0000631
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000632void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
633 if (getOptLevel() == CodeGenOpt::Aggressive)
634 addPass(createGVNPass());
635 else
636 addPass(createEarlyCSEPass());
637}
638
639void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
Stanislav Mekhanoshin20d47952018-06-29 16:26:53 +0000640 addPass(createLICMPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000641 addPass(createSeparateConstOffsetFromGEPPass());
642 addPass(createSpeculativeExecutionPass());
643 // ReassociateGEPs exposes more opportunites for SLSR. See
644 // the example in reassociate-geps-and-slsr.ll.
645 addPass(createStraightLineStrengthReducePass());
646 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
647 // EarlyCSE can reuse.
648 addEarlyCSEOrGVNPass();
649 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
650 addPass(createNaryReassociatePass());
651 // NaryReassociate on GEPs creates redundant common expressions, so run
652 // EarlyCSE after it.
653 addPass(createEarlyCSEPass());
654}
655
Tom Stellard45bb48e2015-06-13 03:28:10 +0000656void AMDGPUPassConfig::addIRPasses() {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000657 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
658
Matt Arsenaultbde80342016-05-18 15:41:07 +0000659 // There is no reason to run these.
660 disablePass(&StackMapLivenessID);
661 disablePass(&FuncletLayoutID);
662 disablePass(&PatchableFunctionID);
663
Stanislav Mekhanoshin4c9c98f2019-08-12 17:12:29 +0000664 addPass(createAMDGPUPrintfRuntimeBinding());
665
Stanislav Mekhanoshin31382782019-06-17 20:42:48 +0000666 // This must occur before inlining, as the inliner will not look through
667 // bitcast calls.
668 addPass(createAMDGPUFixFunctionBitcastsPass());
669
Stanislav Mekhanoshinad04e7a2019-06-17 17:47:28 +0000670 // A call to propagate attributes pass in the backend in case opt was not run.
671 addPass(createAMDGPUPropagateAttributesEarlyPass(&TM));
672
Matt Arsenaultab411932018-10-02 03:50:56 +0000673 addPass(createAtomicExpandPass());
Scott Linder11ef7982018-10-26 13:18:36 +0000674
Scott Linder11ef7982018-10-26 13:18:36 +0000675
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000676 addPass(createAMDGPULowerIntrinsicsPass());
Matt Arsenault0699ef32017-02-09 22:00:42 +0000677
Matt Arsenault635d4792018-10-03 02:47:25 +0000678 // Function calls are not supported, so make sure we inline everything.
679 addPass(createAMDGPUAlwaysInlinePass());
680 addPass(createAlwaysInlinerLegacyPass());
681 // We need to add the barrier noop pass, otherwise adding the function
682 // inlining pass will cause all of the PassConfigs passes to be run
683 // one function at a time, which means if we have a nodule with two
684 // functions, then we will generate code for the first function
685 // without ever running any passes on the second.
686 addPass(createBarrierNoopPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000687
Tom Stellardfd253952015-08-07 23:19:30 +0000688 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
Matt Arsenault432aaea2018-05-13 10:04:48 +0000689 if (TM.getTargetTriple().getArch() == Triple::r600)
690 addPass(createR600OpenCLImageTypeLoweringPass());
Matt Arsenault39319482015-11-06 18:01:57 +0000691
Yaxun Liude4b88d2017-10-10 19:39:48 +0000692 // Replace OpenCL enqueued block function pointers with global variables.
693 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
694
Matt Arsenault03d85842016-06-27 20:32:13 +0000695 if (TM.getOptLevel() > CodeGenOpt::None) {
Matt Arsenault417e0072017-02-08 06:16:04 +0000696 addPass(createInferAddressSpacesPass());
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000697 addPass(createAMDGPUPromoteAlloca());
Matt Arsenault03d85842016-06-27 20:32:13 +0000698
699 if (EnableSROA)
700 addPass(createSROAPass());
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000701
Nikita Popov3db93ac2019-04-07 17:22:16 +0000702 if (EnableScalarIRPasses)
703 addStraightLineScalarOptimizationPasses();
Stanislav Mekhanoshin8e45acf2017-03-17 23:56:58 +0000704
705 if (EnableAMDGPUAliasAnalysis) {
706 addPass(createAMDGPUAAWrapperPass());
707 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
708 AAResults &AAR) {
709 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
710 AAR.addAAResult(WrapperPass->getResult());
711 }));
712 }
Konstantin Zhuravlyov4658e5f2016-09-30 16:39:24 +0000713 }
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000714
Matt Arsenault3b959862019-08-27 00:08:31 +0000715 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
716 // TODO: May want to move later or split into an early and late one.
717 addPass(createAMDGPUCodeGenPreparePass());
718 }
719
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000720 TargetPassConfig::addIRPasses();
721
722 // EarlyCSE is not always strong enough to clean up what LSR produces. For
723 // example, GVN can combine
724 //
725 // %0 = add %a, %b
726 // %1 = add %b, %a
727 //
728 // and
729 //
730 // %0 = shl nsw %a, 2
731 // %1 = shl %a, 2
732 //
733 // but EarlyCSE can do neither of them.
Nikita Popov3db93ac2019-04-07 17:22:16 +0000734 if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses)
Matt Arsenaultf42c6922016-06-15 00:11:01 +0000735 addEarlyCSEOrGVNPass();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000736}
737
Matt Arsenault908b9e22016-07-01 03:33:52 +0000738void AMDGPUPassConfig::addCodeGenPrepare() {
Aakanksha Patilc56d2af2019-03-07 00:54:04 +0000739 if (TM->getTargetTriple().getArch() == Triple::amdgcn)
740 addPass(createAMDGPUAnnotateKernelFeaturesPass());
741
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000742 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
743 EnableLowerKernelArguments)
744 addPass(createAMDGPULowerKernelArgumentsPass());
745
Matt Arsenaulte7e23e32019-07-05 20:26:13 +0000746 addPass(&AMDGPUPerfHintAnalysisID);
747
Matt Arsenault908b9e22016-07-01 03:33:52 +0000748 TargetPassConfig::addCodeGenPrepare();
749
750 if (EnableLoadStoreVectorizer)
751 addPass(createLoadStoreVectorizerPass());
752}
753
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000754bool AMDGPUPassConfig::addPreISel() {
Sameer Sahasrabuddheb4f2d1c2018-09-25 09:39:21 +0000755 addPass(createLowerSwitchPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000756 addPass(createFlattenCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000757 return false;
758}
759
760bool AMDGPUPassConfig::addInstSelector() {
Matt Arsenault9cac4e62019-06-19 00:25:39 +0000761 // Defer the verifier until FinalizeISel.
762 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000763 return false;
764}
765
Matt Arsenault0a109002015-09-25 17:41:20 +0000766bool AMDGPUPassConfig::addGCPasses() {
767 // Do nothing. GC is not supported.
768 return false;
769}
770
Tom Stellard45bb48e2015-06-13 03:28:10 +0000771//===----------------------------------------------------------------------===//
772// R600 Pass Setup
773//===----------------------------------------------------------------------===//
774
775bool R600PassConfig::addPreISel() {
776 AMDGPUPassConfig::addPreISel();
Matt Arsenaultc5816112016-06-24 06:30:22 +0000777
778 if (EnableR600StructurizeCFG)
Tom Stellardbc4497b2016-02-12 23:45:29 +0000779 addPass(createStructurizeCFGPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000780 return false;
781}
782
Tom Stellard20287692017-08-08 04:57:55 +0000783bool R600PassConfig::addInstSelector() {
784 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
785 return false;
786}
787
Tom Stellard45bb48e2015-06-13 03:28:10 +0000788void R600PassConfig::addPreRegAlloc() {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000789 addPass(createR600VectorRegMerger());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000790}
791
792void R600PassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000793 addPass(createR600EmitClauseMarkers(), false);
Matt Arsenault03d85842016-06-27 20:32:13 +0000794 if (EnableR600IfConvert)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000795 addPass(&IfConverterID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000796 addPass(createR600ClauseMergePass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000797}
798
799void R600PassConfig::addPreEmitPass() {
800 addPass(createAMDGPUCFGStructurizerPass(), false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000801 addPass(createR600ExpandSpecialInstrsPass(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000802 addPass(&FinalizeMachineBundlesID, false);
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000803 addPass(createR600Packetizer(), false);
804 addPass(createR600ControlFlowFinalizer(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000805}
806
807TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +0000808 return new R600PassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000809}
810
811//===----------------------------------------------------------------------===//
812// GCN Pass Setup
813//===----------------------------------------------------------------------===//
814
Matt Arsenault03d85842016-06-27 20:32:13 +0000815ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
816 MachineSchedContext *C) const {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000817 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
Matt Arsenault03d85842016-06-27 20:32:13 +0000818 if (ST.enableSIScheduler())
819 return createSIMachineScheduler(C);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000820 return createGCNMaxOccupancyMachineScheduler(C);
Matt Arsenault03d85842016-06-27 20:32:13 +0000821}
822
Tom Stellard45bb48e2015-06-13 03:28:10 +0000823bool GCNPassConfig::addPreISel() {
824 AMDGPUPassConfig::addPreISel();
Matt Arsenault39319482015-11-06 18:01:57 +0000825
Neil Henning66416572018-10-08 15:49:19 +0000826 if (EnableAtomicOptimizations) {
827 addPass(createAMDGPUAtomicOptimizerPass());
828 }
829
Matt Arsenault39319482015-11-06 18:01:57 +0000830 // FIXME: We need to run a pass to propagate the attributes when calls are
831 // supported.
Matt Arsenaultb8f8dbc2017-03-24 19:52:05 +0000832
833 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
834 // regions formed by them.
835 addPass(&AMDGPUUnifyDivergentExitNodesID);
Jan Sjodina06bfe02017-05-15 20:18:37 +0000836 if (!LateCFGStructurize) {
837 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
838 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000839 addPass(createSinkingPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000840 addPass(createAMDGPUAnnotateUniformValues());
Jan Sjodina06bfe02017-05-15 20:18:37 +0000841 if (!LateCFGStructurize) {
842 addPass(createSIAnnotateControlFlowPass());
843 }
Alexander Timofeev2ce560f2019-07-02 17:59:44 +0000844 addPass(createLCSSAPass());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000845
Tom Stellard45bb48e2015-06-13 03:28:10 +0000846 return false;
847}
848
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000849void GCNPassConfig::addMachineSSAOptimization() {
850 TargetPassConfig::addMachineSSAOptimization();
851
852 // We want to fold operands after PeepholeOptimizer has run (or as part of
853 // it), because it will eliminate extra copies making it easier to fold the
854 // real source operand. We want to eliminate dead instructions after, so that
855 // we see fewer uses of the copies. We then need to clean up the dead
856 // instructions leftover after the operands are folded as well.
857 //
858 // XXX - Can we get away without running DeadMachineInstructionElim again?
859 addPass(&SIFoldOperandsID);
Valery Pykhtin3d9afa22018-11-30 14:21:56 +0000860 if (EnableDPPCombine)
861 addPass(&GCNDPPCombineID);
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000862 addPass(&DeadMachineInstructionElimID);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000863 addPass(&SILoadStoreOptimizerID);
Sam Kolton6e795292017-04-07 10:53:12 +0000864 if (EnableSDWAPeephole) {
865 addPass(&SIPeepholeSDWAID);
Matthias Braun4a7c8e72018-01-19 06:46:10 +0000866 addPass(&EarlyMachineLICMID);
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000867 addPass(&MachineCSEID);
868 addPass(&SIFoldOperandsID);
Sam Kolton6e795292017-04-07 10:53:12 +0000869 addPass(&DeadMachineInstructionElimID);
870 }
Stanislav Mekhanoshin03306602017-06-03 17:39:47 +0000871 addPass(createSIShrinkInstructionsPass());
Matt Arsenault3d1c1de2016-04-14 21:58:24 +0000872}
873
Matt Arsenault9f5e0ef2017-01-25 04:25:02 +0000874bool GCNPassConfig::addILPOpts() {
875 if (EnableEarlyIfConversion)
876 addPass(&EarlyIfConverterID);
877
878 TargetPassConfig::addILPOpts();
879 return false;
880}
881
Tom Stellard45bb48e2015-06-13 03:28:10 +0000882bool GCNPassConfig::addInstSelector() {
883 AMDGPUPassConfig::addInstSelector();
Matt Arsenault782c03b2015-11-03 22:30:13 +0000884 addPass(&SIFixSGPRCopiesID);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000885 addPass(createSILowerI1CopiesPass());
Ron Liebermancac749a2018-11-16 01:13:34 +0000886 addPass(createSIFixupVectorISelPass());
David Stuttardf77079f2019-01-14 11:55:24 +0000887 addPass(createSIAddIMGInitPass());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000888 return false;
889}
890
Tom Stellard000c5af2016-04-14 19:09:28 +0000891bool GCNPassConfig::addIRTranslator() {
892 addPass(new IRTranslator());
893 return false;
894}
895
Tim Northover33b07d62016-07-22 20:03:43 +0000896bool GCNPassConfig::addLegalizeMachineIR() {
Tom Stellardca166212017-01-30 21:56:46 +0000897 addPass(new Legalizer());
Tim Northover33b07d62016-07-22 20:03:43 +0000898 return false;
899}
900
Tom Stellard000c5af2016-04-14 19:09:28 +0000901bool GCNPassConfig::addRegBankSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000902 addPass(new RegBankSelect());
Tom Stellard000c5af2016-04-14 19:09:28 +0000903 return false;
904}
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000905
906bool GCNPassConfig::addGlobalInstructionSelect() {
Tom Stellardca166212017-01-30 21:56:46 +0000907 addPass(new InstructionSelect());
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000908 return false;
909}
Tom Stellardca166212017-01-30 21:56:46 +0000910
Tom Stellard45bb48e2015-06-13 03:28:10 +0000911void GCNPassConfig::addPreRegAlloc() {
Jan Sjodina06bfe02017-05-15 20:18:37 +0000912 if (LateCFGStructurize) {
913 addPass(createAMDGPUMachineCFGStructurizerPass());
914 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000915 addPass(createSIWholeQuadModePass());
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000916}
917
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000918void GCNPassConfig::addFastRegAlloc() {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000919 // FIXME: We have to disable the verifier here because of PHIElimination +
920 // TwoAddressInstructions disabling it.
Matt Arsenaulte6740752016-09-29 01:44:16 +0000921
922 // This must be run immediately after phi elimination and before
923 // TwoAddressInstructions, otherwise the processing of the tied operand of
924 // SI_ELSE will introduce a copy of the tied operand source after the else.
925 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000926
Neil Henning0a30f332019-04-01 15:19:52 +0000927 // This must be run just after RegisterCoalescing.
928 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
Connor Abbott92638ab2017-08-04 18:36:52 +0000929
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000930 TargetPassConfig::addFastRegAlloc();
Matt Arsenaultb87fc222015-10-01 22:10:03 +0000931}
932
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000933void GCNPassConfig::addOptimizedRegAlloc() {
Matt Arsenault4d47ac32019-03-27 16:58:30 +0000934 if (OptExecMaskPreRA) {
935 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
936 insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
937 } else {
938 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
939 }
Stanislav Mekhanoshin739174c2018-05-31 20:13:51 +0000940
Matt Arsenaulte6740752016-09-29 01:44:16 +0000941 // This must be run immediately after phi elimination and before
942 // TwoAddressInstructions, otherwise the processing of the tied operand of
943 // SI_ELSE will introduce a copy of the tied operand source after the else.
944 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000945
Neil Henning0a30f332019-04-01 15:19:52 +0000946 // This must be run just after RegisterCoalescing.
947 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
Connor Abbott92638ab2017-08-04 18:36:52 +0000948
Stanislav Mekhanoshinc8f78f82019-04-05 20:11:32 +0000949 if (EnableDCEInRA)
950 insertPass(&RenameIndependentSubregsID, &DeadMachineInstructionElimID);
951
Matt Arsenaultcf55a652019-03-19 19:33:12 +0000952 TargetPassConfig::addOptimizedRegAlloc();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000953}
954
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000955bool GCNPassConfig::addPreRewrite() {
956 if (EnableRegReassign) {
957 addPass(&GCNNSAReassignID);
Stanislav Mekhanoshin3b7925f2019-05-01 16:49:31 +0000958 addPass(&GCNRegBankReassignID);
Stanislav Mekhanoshinc29d4912019-05-01 16:40:49 +0000959 }
960 return true;
961}
962
Matt Arsenaulte6740752016-09-29 01:44:16 +0000963void GCNPassConfig::addPostRegAlloc() {
Stanislav Mekhanoshin22a56f22017-01-24 17:46:17 +0000964 addPass(&SIFixVGPRCopiesID);
Matt Arsenault105fc1a2018-11-26 17:02:02 +0000965 if (getOptLevel() > CodeGenOpt::None)
966 addPass(&SIOptimizeExecMaskingID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000967 TargetPassConfig::addPostRegAlloc();
Matt Arsenault5b0922f2019-07-03 23:32:29 +0000968
969 // Equivalent of PEI for SGPRs.
970 addPass(&SILowerSGPRSpillsID);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000971}
972
Tom Stellard45bb48e2015-06-13 03:28:10 +0000973void GCNPassConfig::addPreSched2() {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000974}
975
976void GCNPassConfig::addPreEmitPass() {
Mark Searles72da47d2018-07-16 10:02:41 +0000977 addPass(createSIMemoryLegalizerPass());
978 addPass(createSIInsertWaitcntsPass());
979 addPass(createSIShrinkInstructionsPass());
Tim Corringham4c4d2fe2018-12-10 12:06:10 +0000980 addPass(createSIModeRegisterPass());
Mark Searles72da47d2018-07-16 10:02:41 +0000981
Tom Stellardcb6ba622016-04-30 00:23:06 +0000982 // The hazard recognizer that runs as part of the post-ra scheduler does not
Matt Arsenault254a6452016-06-28 16:59:53 +0000983 // guarantee to be able handle all hazards correctly. This is because if there
984 // are multiple scheduling regions in a basic block, the regions are scheduled
985 // bottom up, so when we begin to schedule a region we don't know what
986 // instructions were emitted directly before it.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000987 //
Matt Arsenault254a6452016-06-28 16:59:53 +0000988 // Here we add a stand-alone hazard recognizer pass which can handle all
989 // cases.
Mark Searles72da47d2018-07-16 10:02:41 +0000990 //
991 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
992 // be better for it to emit S_NOP <N> when possible.
Tom Stellardcb6ba622016-04-30 00:23:06 +0000993 addPass(&PostRAHazardRecognizerID);
994
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000995 addPass(&SIInsertSkipsPassID);
Matt Arsenault6bc43d82016-10-06 16:20:41 +0000996 addPass(&BranchRelaxationPassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000997}
998
999TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
Matthias Braun5e394c32017-05-30 21:36:41 +00001000 return new GCNPassConfig(*this, PM);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001001}
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +00001002
1003yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
1004 return new yaml::SIMachineFunctionInfo();
1005}
1006
1007yaml::MachineFunctionInfo *
1008GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
1009 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1010 return new yaml::SIMachineFunctionInfo(*MFI,
1011 *MF.getSubtarget().getRegisterInfo());
1012}
1013
1014bool GCNTargetMachine::parseMachineFunctionInfo(
1015 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
1016 SMDiagnostic &Error, SMRange &SourceRange) const {
1017 const yaml::SIMachineFunctionInfo &YamlMFI =
1018 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1019 MachineFunction &MF = PFS.MF;
1020 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1021
1022 MFI->initializeBaseYamlFields(YamlMFI);
1023
1024 auto parseRegister = [&](const yaml::StringValue &RegName, unsigned &RegVal) {
1025 if (parseNamedRegisterReference(PFS, RegVal, RegName.Value, Error)) {
1026 SourceRange = RegName.SourceRange;
1027 return true;
1028 }
1029
1030 return false;
1031 };
1032
1033 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1034 // Create a diagnostic for a the register string literal.
1035 const MemoryBuffer &Buffer =
1036 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1037 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1038 RegName.Value.size(), SourceMgr::DK_Error,
1039 "incorrect register class for field", RegName.Value,
1040 None, None);
1041 SourceRange = RegName.SourceRange;
1042 return true;
1043 };
1044
1045 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1046 parseRegister(YamlMFI.ScratchWaveOffsetReg, MFI->ScratchWaveOffsetReg) ||
1047 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1048 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1049 return true;
1050
1051 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
Matt Arsenault12994a72019-10-10 07:11:33 +00001052 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +00001053 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1054 }
1055
1056 if (MFI->ScratchWaveOffsetReg != AMDGPU::SCRATCH_WAVE_OFFSET_REG &&
1057 !AMDGPU::SGPR_32RegClass.contains(MFI->ScratchWaveOffsetReg)) {
1058 return diagnoseRegisterClass(YamlMFI.ScratchWaveOffsetReg);
1059 }
1060
1061 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1062 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1063 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1064 }
1065
1066 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1067 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1068 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1069 }
1070
Michael Liao80177ca2019-07-03 02:00:21 +00001071 auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A,
1072 const TargetRegisterClass &RC,
Michael Liaob3f967d2019-07-16 15:57:12 +00001073 ArgDescriptor &Arg, unsigned UserSGPRs,
1074 unsigned SystemSGPRs) {
Michael Liao80177ca2019-07-03 02:00:21 +00001075 // Skip parsing if it's not present.
1076 if (!A)
1077 return false;
1078
1079 if (A->IsRegister) {
1080 unsigned Reg;
Michael Liaob3f967d2019-07-16 15:57:12 +00001081 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
Michael Liao80177ca2019-07-03 02:00:21 +00001082 SourceRange = A->RegisterName.SourceRange;
1083 return true;
1084 }
1085 if (!RC.contains(Reg))
1086 return diagnoseRegisterClass(A->RegisterName);
1087 Arg = ArgDescriptor::createRegister(Reg);
1088 } else
1089 Arg = ArgDescriptor::createStack(A->StackOffset);
1090 // Check and apply the optional mask.
1091 if (A->Mask)
1092 Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
1093
Michael Liaob3f967d2019-07-16 15:57:12 +00001094 MFI->NumUserSGPRs += UserSGPRs;
1095 MFI->NumSystemSGPRs += SystemSGPRs;
Michael Liao80177ca2019-07-03 02:00:21 +00001096 return false;
1097 };
1098
1099 if (YamlMFI.ArgInfo &&
1100 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
Matt Arsenault12994a72019-10-10 07:11:33 +00001101 AMDGPU::SGPR_128RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001102 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001103 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
Michael Liaob3f967d2019-07-16 15:57:12 +00001104 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1105 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001106 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001107 MFI->ArgInfo.QueuePtr, 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001108 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1109 AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001110 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001111 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
Michael Liaob3f967d2019-07-16 15:57:12 +00001112 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1113 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001114 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1115 AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001116 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001117 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1118 AMDGPU::SGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001119 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001120 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
Michael Liaob3f967d2019-07-16 15:57:12 +00001121 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1122 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001123 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
Michael Liaob3f967d2019-07-16 15:57:12 +00001124 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1125 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001126 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
Michael Liaob3f967d2019-07-16 15:57:12 +00001127 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1128 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001129 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1130 AMDGPU::SGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001131 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001132 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1133 AMDGPU::SGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001134 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001135 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1136 AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001137 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001138 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1139 AMDGPU::SReg_64RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001140 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001141 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1142 AMDGPU::VGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001143 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001144 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1145 AMDGPU::VGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001146 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
Michael Liao80177ca2019-07-03 02:00:21 +00001147 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1148 AMDGPU::VGPR_32RegClass,
Michael Liaob3f967d2019-07-16 15:57:12 +00001149 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
Michael Liao80177ca2019-07-03 02:00:21 +00001150 return true;
1151
Matt Arsenault58426a32019-07-10 16:09:26 +00001152 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1153 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1154
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +00001155 return false;
1156}