Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 1 | //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===// |
| 2 | // |
Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | /// \file |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 10 | /// The AMDGPU target machine contains all of the hardware specific |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 11 | /// information needed to emit code for R600 and SI GPUs. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "AMDGPUTargetMachine.h" |
| 16 | #include "AMDGPU.h" |
Stanislav Mekhanoshin | 8e45acf | 2017-03-17 23:56:58 +0000 | [diff] [blame] | 17 | #include "AMDGPUAliasAnalysis.h" |
Matt Arsenault | eb9025d | 2016-06-28 17:42:09 +0000 | [diff] [blame] | 18 | #include "AMDGPUCallLowering.h" |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 19 | #include "AMDGPUInstructionSelector.h" |
| 20 | #include "AMDGPULegalizerInfo.h" |
Matt Arsenault | 9aa45f0 | 2017-07-06 20:57:05 +0000 | [diff] [blame] | 21 | #include "AMDGPUMacroFusion.h" |
Matt Arsenault | eb9025d | 2016-06-28 17:42:09 +0000 | [diff] [blame] | 22 | #include "AMDGPUTargetObjectFile.h" |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 23 | #include "AMDGPUTargetTransformInfo.h" |
Valery Pykhtin | fd4c410 | 2017-03-21 13:15:46 +0000 | [diff] [blame] | 24 | #include "GCNIterativeScheduler.h" |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 25 | #include "GCNSchedStrategy.h" |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 26 | #include "R600MachineScheduler.h" |
Matt Arsenault | bc6d07c | 2019-03-14 22:54:43 +0000 | [diff] [blame] | 27 | #include "SIMachineFunctionInfo.h" |
Matt Arsenault | 2ffe8fd | 2016-08-11 19:18:50 +0000 | [diff] [blame] | 28 | #include "SIMachineScheduler.h" |
Richard Trieu | 8ce2ee9 | 2019-05-14 21:54:37 +0000 | [diff] [blame] | 29 | #include "TargetInfo/AMDGPUTargetInfo.h" |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 30 | #include "llvm/CodeGen/GlobalISel/IRTranslator.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 31 | #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 32 | #include "llvm/CodeGen/GlobalISel/Legalizer.h" |
| 33 | #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" |
Matt Arsenault | bc6d07c | 2019-03-14 22:54:43 +0000 | [diff] [blame] | 34 | #include "llvm/CodeGen/MIRParser/MIParser.h" |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 35 | #include "llvm/CodeGen/Passes.h" |
Matthias Braun | 31d19d4 | 2016-05-10 03:21:59 +0000 | [diff] [blame] | 36 | #include "llvm/CodeGen/TargetPassConfig.h" |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 37 | #include "llvm/IR/Attributes.h" |
| 38 | #include "llvm/IR/Function.h" |
Stanislav Mekhanoshin | 50ea93a | 2016-12-08 19:46:04 +0000 | [diff] [blame] | 39 | #include "llvm/IR/LegacyPassManager.h" |
Reid Kleckner | 05da2fe | 2019-11-13 13:15:01 -0800 | [diff] [blame^] | 40 | #include "llvm/InitializePasses.h" |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 41 | #include "llvm/Pass.h" |
| 42 | #include "llvm/Support/CommandLine.h" |
| 43 | #include "llvm/Support/Compiler.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 44 | #include "llvm/Support/TargetRegistry.h" |
David Blaikie | 6054e65 | 2018-03-23 23:58:19 +0000 | [diff] [blame] | 45 | #include "llvm/Target/TargetLoweringObjectFile.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 46 | #include "llvm/Transforms/IPO.h" |
| 47 | #include "llvm/Transforms/IPO/AlwaysInliner.h" |
| 48 | #include "llvm/Transforms/IPO/PassManagerBuilder.h" |
| 49 | #include "llvm/Transforms/Scalar.h" |
| 50 | #include "llvm/Transforms/Scalar/GVN.h" |
Sameer Sahasrabuddhe | b4f2d1c | 2018-09-25 09:39:21 +0000 | [diff] [blame] | 51 | #include "llvm/Transforms/Utils.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 52 | #include "llvm/Transforms/Vectorize.h" |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 53 | #include <memory> |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 54 | |
| 55 | using namespace llvm; |
| 56 | |
Matt Arsenault | c581611 | 2016-06-24 06:30:22 +0000 | [diff] [blame] | 57 | static cl::opt<bool> EnableR600StructurizeCFG( |
| 58 | "r600-ir-structurize", |
| 59 | cl::desc("Use StructurizeCFG IR pass"), |
| 60 | cl::init(true)); |
| 61 | |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 62 | static cl::opt<bool> EnableSROA( |
| 63 | "amdgpu-sroa", |
| 64 | cl::desc("Run SROA after promote alloca pass"), |
| 65 | cl::ReallyHidden, |
| 66 | cl::init(true)); |
| 67 | |
Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 68 | static cl::opt<bool> |
| 69 | EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, |
| 70 | cl::desc("Run early if-conversion"), |
| 71 | cl::init(false)); |
| 72 | |
Matt Arsenault | 4d47ac3 | 2019-03-27 16:58:30 +0000 | [diff] [blame] | 73 | static cl::opt<bool> |
| 74 | OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, |
| 75 | cl::desc("Run pre-RA exec mask optimizations"), |
| 76 | cl::init(true)); |
| 77 | |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 78 | static cl::opt<bool> EnableR600IfConvert( |
| 79 | "r600-if-convert", |
| 80 | cl::desc("Use if conversion pass"), |
| 81 | cl::ReallyHidden, |
| 82 | cl::init(true)); |
| 83 | |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 84 | // Option to disable vectorizer for tests. |
| 85 | static cl::opt<bool> EnableLoadStoreVectorizer( |
| 86 | "amdgpu-load-store-vectorizer", |
| 87 | cl::desc("Enable load store vectorizer"), |
Matt Arsenault | 0efdd06 | 2016-09-09 22:29:28 +0000 | [diff] [blame] | 88 | cl::init(true), |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 89 | cl::Hidden); |
| 90 | |
Hiroshi Inoue | c8e9245 | 2018-01-29 05:17:03 +0000 | [diff] [blame] | 91 | // Option to control global loads scalarization |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 92 | static cl::opt<bool> ScalarizeGlobal( |
| 93 | "amdgpu-scalarize-global-loads", |
| 94 | cl::desc("Enable global load scalarization"), |
Alexander Timofeev | 982aee6 | 2017-07-04 17:32:00 +0000 | [diff] [blame] | 95 | cl::init(true), |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 96 | cl::Hidden); |
| 97 | |
Stanislav Mekhanoshin | a3b7279 | 2017-01-30 21:05:18 +0000 | [diff] [blame] | 98 | // Option to run internalize pass. |
| 99 | static cl::opt<bool> InternalizeSymbols( |
| 100 | "amdgpu-internalize-symbols", |
| 101 | cl::desc("Enable elimination of non-kernel functions and unused globals"), |
| 102 | cl::init(false), |
| 103 | cl::Hidden); |
| 104 | |
Stanislav Mekhanoshin | 9053f22 | 2017-03-28 18:23:24 +0000 | [diff] [blame] | 105 | // Option to inline all early. |
| 106 | static cl::opt<bool> EarlyInlineAll( |
| 107 | "amdgpu-early-inline-all", |
| 108 | cl::desc("Inline all functions early"), |
| 109 | cl::init(false), |
| 110 | cl::Hidden); |
| 111 | |
Sam Kolton | f60ad58 | 2017-03-21 12:51:34 +0000 | [diff] [blame] | 112 | static cl::opt<bool> EnableSDWAPeephole( |
| 113 | "amdgpu-sdwa-peephole", |
| 114 | cl::desc("Enable SDWA peepholer"), |
Sam Kolton | 9fa1696 | 2017-04-06 15:03:28 +0000 | [diff] [blame] | 115 | cl::init(true)); |
Sam Kolton | f60ad58 | 2017-03-21 12:51:34 +0000 | [diff] [blame] | 116 | |
Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 117 | static cl::opt<bool> EnableDPPCombine( |
| 118 | "amdgpu-dpp-combine", |
| 119 | cl::desc("Enable DPP combiner"), |
Valery Pykhtin | ded96df | 2019-02-11 11:15:03 +0000 | [diff] [blame] | 120 | cl::init(true)); |
Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 121 | |
Stanislav Mekhanoshin | 8e45acf | 2017-03-17 23:56:58 +0000 | [diff] [blame] | 122 | // Enable address space based alias analysis |
| 123 | static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, |
| 124 | cl::desc("Enable AMDGPU Alias Analysis"), |
| 125 | cl::init(true)); |
| 126 | |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 127 | // Option to run late CFG structurizer |
Matt Arsenault | cc85223 | 2017-10-10 20:22:07 +0000 | [diff] [blame] | 128 | static cl::opt<bool, true> LateCFGStructurize( |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 129 | "amdgpu-late-structurize", |
| 130 | cl::desc("Enable late CFG structurization"), |
Matt Arsenault | cc85223 | 2017-10-10 20:22:07 +0000 | [diff] [blame] | 131 | cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 132 | cl::Hidden); |
| 133 | |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 134 | static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt( |
Matt Arsenault | b62a4eb | 2017-08-01 19:54:18 +0000 | [diff] [blame] | 135 | "amdgpu-function-calls", |
Matt Arsenault | b62a4eb | 2017-08-01 19:54:18 +0000 | [diff] [blame] | 136 | cl::desc("Enable AMDGPU function call support"), |
Matt Arsenault | a680199 | 2018-07-10 14:03:41 +0000 | [diff] [blame] | 137 | cl::location(AMDGPUTargetMachine::EnableFunctionCalls), |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 138 | cl::init(true), |
Matt Arsenault | a680199 | 2018-07-10 14:03:41 +0000 | [diff] [blame] | 139 | cl::Hidden); |
Matt Arsenault | b62a4eb | 2017-08-01 19:54:18 +0000 | [diff] [blame] | 140 | |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 141 | // Enable lib calls simplifications |
| 142 | static cl::opt<bool> EnableLibCallSimplify( |
| 143 | "amdgpu-simplify-libcall", |
Matt Arsenault | 2e4d338 | 2018-05-29 19:35:46 +0000 | [diff] [blame] | 144 | cl::desc("Enable amdgpu library simplifications"), |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 145 | cl::init(true), |
| 146 | cl::Hidden); |
| 147 | |
Matt Arsenault | 8c4a352 | 2018-06-26 19:10:00 +0000 | [diff] [blame] | 148 | static cl::opt<bool> EnableLowerKernelArguments( |
| 149 | "amdgpu-ir-lower-kernel-arguments", |
| 150 | cl::desc("Lower kernel argument loads in IR pass"), |
| 151 | cl::init(true), |
| 152 | cl::Hidden); |
| 153 | |
Stanislav Mekhanoshin | c29d491 | 2019-05-01 16:40:49 +0000 | [diff] [blame] | 154 | static cl::opt<bool> EnableRegReassign( |
| 155 | "amdgpu-reassign-regs", |
| 156 | cl::desc("Enable register reassign optimizations on gfx10+"), |
| 157 | cl::init(true), |
| 158 | cl::Hidden); |
| 159 | |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 160 | // Enable atomic optimization |
| 161 | static cl::opt<bool> EnableAtomicOptimizations( |
| 162 | "amdgpu-atomic-optimizations", |
| 163 | cl::desc("Enable atomic optimizations"), |
| 164 | cl::init(false), |
| 165 | cl::Hidden); |
| 166 | |
Tim Corringham | 4c4d2fe | 2018-12-10 12:06:10 +0000 | [diff] [blame] | 167 | // Enable Mode register optimization |
| 168 | static cl::opt<bool> EnableSIModeRegisterPass( |
| 169 | "amdgpu-mode-register", |
| 170 | cl::desc("Enable mode register pass"), |
| 171 | cl::init(true), |
| 172 | cl::Hidden); |
| 173 | |
Stanislav Mekhanoshin | c8f78f8 | 2019-04-05 20:11:32 +0000 | [diff] [blame] | 174 | // Option is used in lit tests to prevent deadcoding of patterns inspected. |
| 175 | static cl::opt<bool> |
| 176 | EnableDCEInRA("amdgpu-dce-in-ra", |
| 177 | cl::init(true), cl::Hidden, |
| 178 | cl::desc("Enable machine DCE inside regalloc")); |
| 179 | |
Nikita Popov | 3db93ac | 2019-04-07 17:22:16 +0000 | [diff] [blame] | 180 | static cl::opt<bool> EnableScalarIRPasses( |
| 181 | "amdgpu-scalar-ir-passes", |
| 182 | cl::desc("Enable scalar IR passes"), |
| 183 | cl::init(true), |
| 184 | cl::Hidden); |
| 185 | |
Tom Stellard | 4b0b261 | 2019-06-11 03:21:13 +0000 | [diff] [blame] | 186 | extern "C" void LLVMInitializeAMDGPUTarget() { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 187 | // Register the target |
Mehdi Amini | f42454b | 2016-10-09 23:00:34 +0000 | [diff] [blame] | 188 | RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget()); |
| 189 | RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget()); |
Matt Arsenault | b87fc22 | 2015-10-01 22:10:03 +0000 | [diff] [blame] | 190 | |
| 191 | PassRegistry *PR = PassRegistry::getPassRegistry(); |
Tom Stellard | a2f57be | 2017-08-02 22:19:45 +0000 | [diff] [blame] | 192 | initializeR600ClauseMergePassPass(*PR); |
| 193 | initializeR600ControlFlowFinalizerPass(*PR); |
| 194 | initializeR600PacketizerPass(*PR); |
| 195 | initializeR600ExpandSpecialInstrsPassPass(*PR); |
| 196 | initializeR600VectorRegMergerPass(*PR); |
Tom Stellard | e753c52 | 2018-04-09 16:09:13 +0000 | [diff] [blame] | 197 | initializeGlobalISel(*PR); |
Matt Arsenault | 7016f13 | 2017-08-03 22:30:46 +0000 | [diff] [blame] | 198 | initializeAMDGPUDAGToDAGISelPass(*PR); |
Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 199 | initializeGCNDPPCombinePass(*PR); |
Matt Arsenault | 8c0ef8b | 2015-10-12 17:43:59 +0000 | [diff] [blame] | 200 | initializeSILowerI1CopiesPass(*PR); |
Matt Arsenault | 5b0922f | 2019-07-03 23:32:29 +0000 | [diff] [blame] | 201 | initializeSILowerSGPRSpillsPass(*PR); |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 202 | initializeSIFixSGPRCopiesPass(*PR); |
Stanislav Mekhanoshin | 22a56f2 | 2017-01-24 17:46:17 +0000 | [diff] [blame] | 203 | initializeSIFixVGPRCopiesPass(*PR); |
Ron Lieberman | cac749a | 2018-11-16 01:13:34 +0000 | [diff] [blame] | 204 | initializeSIFixupVectorISelPass(*PR); |
Matt Arsenault | 8c0ef8b | 2015-10-12 17:43:59 +0000 | [diff] [blame] | 205 | initializeSIFoldOperandsPass(*PR); |
Sam Kolton | f60ad58 | 2017-03-21 12:51:34 +0000 | [diff] [blame] | 206 | initializeSIPeepholeSDWAPass(*PR); |
Matt Arsenault | c3a01ec | 2016-06-09 23:18:47 +0000 | [diff] [blame] | 207 | initializeSIShrinkInstructionsPass(*PR); |
Stanislav Mekhanoshin | 37e7f95 | 2017-08-01 23:14:32 +0000 | [diff] [blame] | 208 | initializeSIOptimizeExecMaskingPreRAPass(*PR); |
Matt Arsenault | 187276f | 2015-10-07 00:42:53 +0000 | [diff] [blame] | 209 | initializeSILoadStoreOptimizerPass(*PR); |
Scott Linder | 11ef798 | 2018-10-26 13:18:36 +0000 | [diff] [blame] | 210 | initializeAMDGPUFixFunctionBitcastsPass(*PR); |
Matt Arsenault | 746e065 | 2017-06-02 18:02:42 +0000 | [diff] [blame] | 211 | initializeAMDGPUAlwaysInlinePass(*PR); |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 212 | initializeAMDGPUAnnotateKernelFeaturesPass(*PR); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 213 | initializeAMDGPUAnnotateUniformValuesPass(*PR); |
Matt Arsenault | 7016f13 | 2017-08-03 22:30:46 +0000 | [diff] [blame] | 214 | initializeAMDGPUArgumentUsageInfoPass(*PR); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 215 | initializeAMDGPUAtomicOptimizerPass(*PR); |
Matt Arsenault | 8c4a352 | 2018-06-26 19:10:00 +0000 | [diff] [blame] | 216 | initializeAMDGPULowerKernelArgumentsPass(*PR); |
Matt Arsenault | 372d796 | 2018-05-18 21:35:00 +0000 | [diff] [blame] | 217 | initializeAMDGPULowerKernelAttributesPass(*PR); |
Matt Arsenault | 0699ef3 | 2017-02-09 22:00:42 +0000 | [diff] [blame] | 218 | initializeAMDGPULowerIntrinsicsPass(*PR); |
Yaxun Liu | de4b88d | 2017-10-10 19:39:48 +0000 | [diff] [blame] | 219 | initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR); |
Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 220 | initializeAMDGPUPromoteAllocaPass(*PR); |
Matt Arsenault | 86de486 | 2016-06-24 07:07:55 +0000 | [diff] [blame] | 221 | initializeAMDGPUCodeGenPreparePass(*PR); |
Stanislav Mekhanoshin | ad04e7a | 2019-06-17 17:47:28 +0000 | [diff] [blame] | 222 | initializeAMDGPUPropagateAttributesEarlyPass(*PR); |
| 223 | initializeAMDGPUPropagateAttributesLatePass(*PR); |
Matt Arsenault | c06574f | 2017-07-28 18:40:05 +0000 | [diff] [blame] | 224 | initializeAMDGPURewriteOutArgumentsPass(*PR); |
Stanislav Mekhanoshin | 50ea93a | 2016-12-08 19:46:04 +0000 | [diff] [blame] | 225 | initializeAMDGPUUnifyMetadataPass(*PR); |
Tom Stellard | 77a1777 | 2016-01-20 15:48:27 +0000 | [diff] [blame] | 226 | initializeSIAnnotateControlFlowPass(*PR); |
Kannan Narayanan | acb089e | 2017-04-12 03:25:12 +0000 | [diff] [blame] | 227 | initializeSIInsertWaitcntsPass(*PR); |
Tim Corringham | 4c4d2fe | 2018-12-10 12:06:10 +0000 | [diff] [blame] | 228 | initializeSIModeRegisterPass(*PR); |
Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 229 | initializeSIWholeQuadModePass(*PR); |
Matt Arsenault | 55d49cf | 2016-02-12 02:16:10 +0000 | [diff] [blame] | 230 | initializeSILowerControlFlowPass(*PR); |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 231 | initializeSIInsertSkipsPass(*PR); |
Konstantin Zhuravlyov | e9a5a77 | 2017-07-21 21:19:23 +0000 | [diff] [blame] | 232 | initializeSIMemoryLegalizerPass(*PR); |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 233 | initializeSIOptimizeExecMaskingPass(*PR); |
Neil Henning | 0a30f33 | 2019-04-01 15:19:52 +0000 | [diff] [blame] | 234 | initializeSIPreAllocateWWMRegsPass(*PR); |
Stanislav Mekhanoshin | 739174c | 2018-05-31 20:13:51 +0000 | [diff] [blame] | 235 | initializeSIFormMemoryClausesPass(*PR); |
Matt Arsenault | b8f8dbc | 2017-03-24 19:52:05 +0000 | [diff] [blame] | 236 | initializeAMDGPUUnifyDivergentExitNodesPass(*PR); |
Stanislav Mekhanoshin | 8e45acf | 2017-03-17 23:56:58 +0000 | [diff] [blame] | 237 | initializeAMDGPUAAWrapperPassPass(*PR); |
Matt Arsenault | 8ba740a | 2018-11-07 20:26:42 +0000 | [diff] [blame] | 238 | initializeAMDGPUExternalAAWrapperPass(*PR); |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 239 | initializeAMDGPUUseNativeCallsPass(*PR); |
| 240 | initializeAMDGPUSimplifyLibCallsPass(*PR); |
Stanislav Mekhanoshin | 5670e6d | 2017-09-20 04:25:58 +0000 | [diff] [blame] | 241 | initializeAMDGPUInlinerPass(*PR); |
Stanislav Mekhanoshin | 4c9c98f | 2019-08-12 17:12:29 +0000 | [diff] [blame] | 242 | initializeAMDGPUPrintfRuntimeBindingPass(*PR); |
Stanislav Mekhanoshin | 3b7925f | 2019-05-01 16:49:31 +0000 | [diff] [blame] | 243 | initializeGCNRegBankReassignPass(*PR); |
Stanislav Mekhanoshin | c29d491 | 2019-05-01 16:40:49 +0000 | [diff] [blame] | 244 | initializeGCNNSAReassignPass(*PR); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 245 | } |
| 246 | |
Tom Stellard | e135ffd | 2015-09-25 21:41:28 +0000 | [diff] [blame] | 247 | static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { |
Jonas Devlieghere | 0eaee54 | 2019-08-15 15:54:37 +0000 | [diff] [blame] | 248 | return std::make_unique<AMDGPUTargetObjectFile>(); |
Tom Stellard | e135ffd | 2015-09-25 21:41:28 +0000 | [diff] [blame] | 249 | } |
| 250 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 251 | static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) { |
Jonas Devlieghere | 0eaee54 | 2019-08-15 15:54:37 +0000 | [diff] [blame] | 252 | return new ScheduleDAGMILive(C, std::make_unique<R600SchedStrategy>()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 253 | } |
| 254 | |
Matt Arsenault | 2ffe8fd | 2016-08-11 19:18:50 +0000 | [diff] [blame] | 255 | static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) { |
| 256 | return new SIScheduleDAGMI(C); |
| 257 | } |
| 258 | |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 259 | static ScheduleDAGInstrs * |
| 260 | createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { |
| 261 | ScheduleDAGMILive *DAG = |
Jonas Devlieghere | 0eaee54 | 2019-08-15 15:54:37 +0000 | [diff] [blame] | 262 | new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C)); |
Matthias Braun | 115efcd | 2016-11-28 20:11:54 +0000 | [diff] [blame] | 263 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 264 | DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
Matt Arsenault | 9aa45f0 | 2017-07-06 20:57:05 +0000 | [diff] [blame] | 265 | DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 266 | return DAG; |
| 267 | } |
| 268 | |
Valery Pykhtin | fd4c410 | 2017-03-21 13:15:46 +0000 | [diff] [blame] | 269 | static ScheduleDAGInstrs * |
| 270 | createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { |
| 271 | auto DAG = new GCNIterativeScheduler(C, |
| 272 | GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY); |
| 273 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 274 | DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 275 | return DAG; |
| 276 | } |
| 277 | |
| 278 | static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) { |
| 279 | return new GCNIterativeScheduler(C, |
| 280 | GCNIterativeScheduler::SCHEDULE_MINREGFORCED); |
| 281 | } |
| 282 | |
Valery Pykhtin | f2fe972 | 2017-11-20 14:35:53 +0000 | [diff] [blame] | 283 | static ScheduleDAGInstrs * |
| 284 | createIterativeILPMachineScheduler(MachineSchedContext *C) { |
| 285 | auto DAG = new GCNIterativeScheduler(C, |
| 286 | GCNIterativeScheduler::SCHEDULE_ILP); |
| 287 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 288 | DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 289 | DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); |
| 290 | return DAG; |
| 291 | } |
| 292 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 293 | static MachineSchedRegistry |
Nicolai Haehnle | 02c3291 | 2016-01-13 16:10:10 +0000 | [diff] [blame] | 294 | R600SchedRegistry("r600", "Run R600's custom scheduler", |
| 295 | createR600MachineScheduler); |
| 296 | |
| 297 | static MachineSchedRegistry |
| 298 | SISchedRegistry("si", "Run SI's custom scheduler", |
| 299 | createSIMachineScheduler); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 300 | |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 301 | static MachineSchedRegistry |
| 302 | GCNMaxOccupancySchedRegistry("gcn-max-occupancy", |
| 303 | "Run GCN scheduler to maximize occupancy", |
| 304 | createGCNMaxOccupancyMachineScheduler); |
| 305 | |
Valery Pykhtin | fd4c410 | 2017-03-21 13:15:46 +0000 | [diff] [blame] | 306 | static MachineSchedRegistry |
| 307 | IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental", |
| 308 | "Run GCN scheduler to maximize occupancy (experimental)", |
| 309 | createIterativeGCNMaxOccupancyMachineScheduler); |
| 310 | |
| 311 | static MachineSchedRegistry |
| 312 | GCNMinRegSchedRegistry("gcn-minreg", |
| 313 | "Run GCN iterative scheduler for minimal register usage (experimental)", |
| 314 | createMinRegScheduler); |
| 315 | |
Valery Pykhtin | f2fe972 | 2017-11-20 14:35:53 +0000 | [diff] [blame] | 316 | static MachineSchedRegistry |
| 317 | GCNILPSchedRegistry("gcn-ilp", |
| 318 | "Run GCN iterative scheduler for ILP scheduling (experimental)", |
| 319 | createIterativeILPMachineScheduler); |
| 320 | |
Matt Arsenault | ec30eb5 | 2016-05-31 16:57:45 +0000 | [diff] [blame] | 321 | static StringRef computeDataLayout(const Triple &TT) { |
| 322 | if (TT.getArch() == Triple::r600) { |
| 323 | // 32-bit pointers. |
Yaxun Liu | cc56a8b | 2017-11-06 14:32:33 +0000 | [diff] [blame] | 324 | return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" |
Matt Arsenault | 95329f8 | 2018-03-27 19:26:40 +0000 | [diff] [blame] | 325 | "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 326 | } |
| 327 | |
Matt Arsenault | ec30eb5 | 2016-05-31 16:57:45 +0000 | [diff] [blame] | 328 | // 32-bit private, local, and region pointers. 64-bit global, constant and |
Neil Henning | 523dab0 | 2019-03-18 14:44:28 +0000 | [diff] [blame] | 329 | // flat, non-integral buffer fat pointers. |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 330 | return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32" |
Matt Arsenault | ec30eb5 | 2016-05-31 16:57:45 +0000 | [diff] [blame] | 331 | "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" |
Neil Henning | 523dab0 | 2019-03-18 14:44:28 +0000 | [diff] [blame] | 332 | "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" |
| 333 | "-ni:7"; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 334 | } |
| 335 | |
Matt Arsenault | b22828f | 2016-01-27 02:17:49 +0000 | [diff] [blame] | 336 | LLVM_READNONE |
| 337 | static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) { |
| 338 | if (!GPU.empty()) |
| 339 | return GPU; |
| 340 | |
Matt Arsenault | e0c1f9e | 2019-03-17 21:31:35 +0000 | [diff] [blame] | 341 | // Need to default to a target with flat support for HSA. |
Matt Arsenault | b22828f | 2016-01-27 02:17:49 +0000 | [diff] [blame] | 342 | if (TT.getArch() == Triple::amdgcn) |
Matt Arsenault | e0c1f9e | 2019-03-17 21:31:35 +0000 | [diff] [blame] | 343 | return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic"; |
Matt Arsenault | b22828f | 2016-01-27 02:17:49 +0000 | [diff] [blame] | 344 | |
Matt Arsenault | 8e00194 | 2016-06-02 18:37:16 +0000 | [diff] [blame] | 345 | return "r600"; |
Matt Arsenault | b22828f | 2016-01-27 02:17:49 +0000 | [diff] [blame] | 346 | } |
| 347 | |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 348 | static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { |
Tom Stellard | 418beb7 | 2016-07-13 14:23:33 +0000 | [diff] [blame] | 349 | // The AMDGPU toolchain only supports generating shared objects, so we |
| 350 | // must always use PIC. |
| 351 | return Reloc::PIC_; |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 352 | } |
| 353 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 354 | AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT, |
| 355 | StringRef CPU, StringRef FS, |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 356 | TargetOptions Options, |
| 357 | Optional<Reloc::Model> RM, |
Rafael Espindola | 79e238a | 2017-08-03 02:16:21 +0000 | [diff] [blame] | 358 | Optional<CodeModel::Model> CM, |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 359 | CodeGenOpt::Level OptLevel) |
Matthias Braun | bb8507e | 2017-10-12 22:57:28 +0000 | [diff] [blame] | 360 | : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), |
| 361 | FS, Options, getEffectiveRelocModel(RM), |
David Green | ca29c27 | 2018-12-07 12:10:23 +0000 | [diff] [blame] | 362 | getEffectiveCodeModel(CM, CodeModel::Small), OptLevel), |
Rafael Espindola | 79e238a | 2017-08-03 02:16:21 +0000 | [diff] [blame] | 363 | TLOF(createTLOF(getTargetTriple())) { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 364 | initAsmInfo(); |
| 365 | } |
| 366 | |
Vlad Tsyrklevich | 688e752 | 2018-07-10 00:46:07 +0000 | [diff] [blame] | 367 | bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false; |
Matt Arsenault | a680199 | 2018-07-10 14:03:41 +0000 | [diff] [blame] | 368 | bool AMDGPUTargetMachine::EnableFunctionCalls = false; |
| 369 | |
| 370 | AMDGPUTargetMachine::~AMDGPUTargetMachine() = default; |
Vlad Tsyrklevich | 688e752 | 2018-07-10 00:46:07 +0000 | [diff] [blame] | 371 | |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 372 | StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const { |
| 373 | Attribute GPUAttr = F.getFnAttribute("target-cpu"); |
| 374 | return GPUAttr.hasAttribute(Attribute::None) ? |
| 375 | getTargetCPU() : GPUAttr.getValueAsString(); |
| 376 | } |
| 377 | |
| 378 | StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const { |
| 379 | Attribute FSAttr = F.getFnAttribute("target-features"); |
| 380 | |
| 381 | return FSAttr.hasAttribute(Attribute::None) ? |
| 382 | getTargetFeatureString() : |
| 383 | FSAttr.getValueAsString(); |
| 384 | } |
| 385 | |
Matt Arsenault | e745d99 | 2017-09-19 07:40:11 +0000 | [diff] [blame] | 386 | /// Predicate for Internalize pass. |
Benjamin Kramer | f9ab3dd | 2017-10-31 23:21:30 +0000 | [diff] [blame] | 387 | static bool mustPreserveGV(const GlobalValue &GV) { |
Matt Arsenault | e745d99 | 2017-09-19 07:40:11 +0000 | [diff] [blame] | 388 | if (const Function *F = dyn_cast<Function>(&GV)) |
| 389 | return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv()); |
| 390 | |
| 391 | return !GV.use_empty(); |
| 392 | } |
| 393 | |
Stanislav Mekhanoshin | 8159811 | 2017-01-26 16:49:08 +0000 | [diff] [blame] | 394 | void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { |
Stanislav Mekhanoshin | ee2dd78 | 2017-03-17 17:13:41 +0000 | [diff] [blame] | 395 | Builder.DivergentTarget = true; |
| 396 | |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 397 | bool EnableOpt = getOptLevel() > CodeGenOpt::None; |
Matt Arsenault | e745d99 | 2017-09-19 07:40:11 +0000 | [diff] [blame] | 398 | bool Internalize = InternalizeSymbols; |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 399 | bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls; |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 400 | bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt; |
| 401 | bool LibCallSimplify = EnableLibCallSimplify && EnableOpt; |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 402 | |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 403 | if (EnableFunctionCalls) { |
Stanislav Mekhanoshin | 2e3bf37 | 2017-09-20 06:34:28 +0000 | [diff] [blame] | 404 | delete Builder.Inliner; |
Stanislav Mekhanoshin | 5641820 | 2017-09-20 06:10:15 +0000 | [diff] [blame] | 405 | Builder.Inliner = createAMDGPUFunctionInliningPass(); |
Stanislav Mekhanoshin | 2e3bf37 | 2017-09-20 06:34:28 +0000 | [diff] [blame] | 406 | } |
Stanislav Mekhanoshin | 5670e6d | 2017-09-20 04:25:58 +0000 | [diff] [blame] | 407 | |
Stanislav Mekhanoshin | 8159811 | 2017-01-26 16:49:08 +0000 | [diff] [blame] | 408 | Builder.addExtension( |
Stanislav Mekhanoshin | f6c1feb | 2017-01-27 16:38:10 +0000 | [diff] [blame] | 409 | PassManagerBuilder::EP_ModuleOptimizerEarly, |
Stanislav Mekhanoshin | ad04e7a | 2019-06-17 17:47:28 +0000 | [diff] [blame] | 410 | [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &, |
| 411 | legacy::PassManagerBase &PM) { |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 412 | if (AMDGPUAA) { |
| 413 | PM.add(createAMDGPUAAWrapperPass()); |
| 414 | PM.add(createAMDGPUExternalAAWrapperPass()); |
| 415 | } |
Stanislav Mekhanoshin | 8159811 | 2017-01-26 16:49:08 +0000 | [diff] [blame] | 416 | PM.add(createAMDGPUUnifyMetadataPass()); |
Stanislav Mekhanoshin | 4c9c98f | 2019-08-12 17:12:29 +0000 | [diff] [blame] | 417 | PM.add(createAMDGPUPrintfRuntimeBinding()); |
Stanislav Mekhanoshin | ad04e7a | 2019-06-17 17:47:28 +0000 | [diff] [blame] | 418 | PM.add(createAMDGPUPropagateAttributesLatePass(this)); |
Stanislav Mekhanoshin | a3b7279 | 2017-01-30 21:05:18 +0000 | [diff] [blame] | 419 | if (Internalize) { |
Matt Arsenault | e745d99 | 2017-09-19 07:40:11 +0000 | [diff] [blame] | 420 | PM.add(createInternalizePass(mustPreserveGV)); |
Stanislav Mekhanoshin | a3b7279 | 2017-01-30 21:05:18 +0000 | [diff] [blame] | 421 | PM.add(createGlobalDCEPass()); |
| 422 | } |
Stanislav Mekhanoshin | 9053f22 | 2017-03-28 18:23:24 +0000 | [diff] [blame] | 423 | if (EarlyInline) |
Stanislav Mekhanoshin | 89653df | 2017-03-30 20:16:02 +0000 | [diff] [blame] | 424 | PM.add(createAMDGPUAlwaysInlinePass(false)); |
Stanislav Mekhanoshin | a3b7279 | 2017-01-30 21:05:18 +0000 | [diff] [blame] | 425 | }); |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 426 | |
Stanislav Mekhanoshin | 1d8cf2b | 2017-09-29 23:40:19 +0000 | [diff] [blame] | 427 | const auto &Opt = Options; |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 428 | Builder.addExtension( |
| 429 | PassManagerBuilder::EP_EarlyAsPossible, |
Stanislav Mekhanoshin | ad04e7a | 2019-06-17 17:47:28 +0000 | [diff] [blame] | 430 | [AMDGPUAA, LibCallSimplify, &Opt, this](const PassManagerBuilder &, |
| 431 | legacy::PassManagerBase &PM) { |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 432 | if (AMDGPUAA) { |
| 433 | PM.add(createAMDGPUAAWrapperPass()); |
| 434 | PM.add(createAMDGPUExternalAAWrapperPass()); |
| 435 | } |
Stanislav Mekhanoshin | ad04e7a | 2019-06-17 17:47:28 +0000 | [diff] [blame] | 436 | PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this)); |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 437 | PM.add(llvm::createAMDGPUUseNativeCallsPass()); |
| 438 | if (LibCallSimplify) |
Stanislav Mekhanoshin | a9191c8 | 2019-06-17 17:57:50 +0000 | [diff] [blame] | 439 | PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt, this)); |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 440 | }); |
Stanislav Mekhanoshin | 50c2f25 | 2017-06-19 23:17:36 +0000 | [diff] [blame] | 441 | |
| 442 | Builder.addExtension( |
| 443 | PassManagerBuilder::EP_CGSCCOptimizerLate, |
| 444 | [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { |
| 445 | // Add infer address spaces pass to the opt pipeline after inlining |
| 446 | // but before SROA to increase SROA opportunities. |
| 447 | PM.add(createInferAddressSpacesPass()); |
Matt Arsenault | 372d796 | 2018-05-18 21:35:00 +0000 | [diff] [blame] | 448 | |
| 449 | // This should run after inlining to have any chance of doing anything, |
| 450 | // and before other cleanup optimizations. |
| 451 | PM.add(createAMDGPULowerKernelAttributesPass()); |
Stanislav Mekhanoshin | 50c2f25 | 2017-06-19 23:17:36 +0000 | [diff] [blame] | 452 | }); |
Stanislav Mekhanoshin | 50ea93a | 2016-12-08 19:46:04 +0000 | [diff] [blame] | 453 | } |
| 454 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 455 | //===----------------------------------------------------------------------===// |
| 456 | // R600 Target Machine (R600 -> Cayman) |
| 457 | //===----------------------------------------------------------------------===// |
| 458 | |
| 459 | R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT, |
Tom Stellard | 5dde1d2 | 2016-02-05 18:29:17 +0000 | [diff] [blame] | 460 | StringRef CPU, StringRef FS, |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 461 | TargetOptions Options, |
| 462 | Optional<Reloc::Model> RM, |
Rafael Espindola | 79e238a | 2017-08-03 02:16:21 +0000 | [diff] [blame] | 463 | Optional<CodeModel::Model> CM, |
| 464 | CodeGenOpt::Level OL, bool JIT) |
| 465 | : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) { |
Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 466 | setRequiresStructuredCFG(true); |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 467 | |
Matt Arsenault | 09a09ef | 2019-02-28 00:52:33 +0000 | [diff] [blame] | 468 | // Override the default since calls aren't supported for r600. |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 469 | if (EnableFunctionCalls && |
| 470 | EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0) |
| 471 | EnableFunctionCalls = false; |
Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 472 | } |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 473 | |
| 474 | const R600Subtarget *R600TargetMachine::getSubtargetImpl( |
| 475 | const Function &F) const { |
| 476 | StringRef GPU = getGPUName(F); |
| 477 | StringRef FS = getFeatureString(F); |
| 478 | |
| 479 | SmallString<128> SubtargetKey(GPU); |
| 480 | SubtargetKey.append(FS); |
| 481 | |
| 482 | auto &I = SubtargetMap[SubtargetKey]; |
| 483 | if (!I) { |
| 484 | // This needs to be done before we create a new subtarget since any |
| 485 | // creation will depend on the TM and the code generation flags on the |
| 486 | // function that reside in TargetOptions. |
| 487 | resetTargetOptions(F); |
Jonas Devlieghere | 0eaee54 | 2019-08-15 15:54:37 +0000 | [diff] [blame] | 488 | I = std::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this); |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 489 | } |
| 490 | |
| 491 | return I.get(); |
| 492 | } |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 493 | |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 494 | TargetTransformInfo |
| 495 | R600TargetMachine::getTargetTransformInfo(const Function &F) { |
| 496 | return TargetTransformInfo(R600TTIImpl(this, F)); |
| 497 | } |
| 498 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 499 | //===----------------------------------------------------------------------===// |
| 500 | // GCN Target Machine (SI+) |
| 501 | //===----------------------------------------------------------------------===// |
| 502 | |
| 503 | GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT, |
Tom Stellard | 5dde1d2 | 2016-02-05 18:29:17 +0000 | [diff] [blame] | 504 | StringRef CPU, StringRef FS, |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 505 | TargetOptions Options, |
| 506 | Optional<Reloc::Model> RM, |
Rafael Espindola | 79e238a | 2017-08-03 02:16:21 +0000 | [diff] [blame] | 507 | Optional<CodeModel::Model> CM, |
| 508 | CodeGenOpt::Level OL, bool JIT) |
| 509 | : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {} |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 510 | |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 511 | const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const { |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 512 | StringRef GPU = getGPUName(F); |
| 513 | StringRef FS = getFeatureString(F); |
| 514 | |
| 515 | SmallString<128> SubtargetKey(GPU); |
| 516 | SubtargetKey.append(FS); |
| 517 | |
| 518 | auto &I = SubtargetMap[SubtargetKey]; |
| 519 | if (!I) { |
| 520 | // This needs to be done before we create a new subtarget since any |
| 521 | // creation will depend on the TM and the code generation flags on the |
| 522 | // function that reside in TargetOptions. |
| 523 | resetTargetOptions(F); |
Jonas Devlieghere | 0eaee54 | 2019-08-15 15:54:37 +0000 | [diff] [blame] | 524 | I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this); |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 525 | } |
| 526 | |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 527 | I->setScalarizeGlobalBehavior(ScalarizeGlobal); |
| 528 | |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 529 | return I.get(); |
| 530 | } |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 531 | |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 532 | TargetTransformInfo |
| 533 | GCNTargetMachine::getTargetTransformInfo(const Function &F) { |
| 534 | return TargetTransformInfo(GCNTTIImpl(this, F)); |
| 535 | } |
| 536 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 537 | //===----------------------------------------------------------------------===// |
| 538 | // AMDGPU Pass Setup |
| 539 | //===----------------------------------------------------------------------===// |
| 540 | |
| 541 | namespace { |
Tom Stellard | cc7067a6 | 2016-03-03 03:53:29 +0000 | [diff] [blame] | 542 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 543 | class AMDGPUPassConfig : public TargetPassConfig { |
| 544 | public: |
Matthias Braun | bb8507e | 2017-10-12 22:57:28 +0000 | [diff] [blame] | 545 | AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) |
Matt Arsenault | 0a10900 | 2015-09-25 17:41:20 +0000 | [diff] [blame] | 546 | : TargetPassConfig(TM, PM) { |
Matt Arsenault | 0a10900 | 2015-09-25 17:41:20 +0000 | [diff] [blame] | 547 | // Exceptions and StackMaps are not supported, so these passes will never do |
| 548 | // anything. |
| 549 | disablePass(&StackMapLivenessID); |
| 550 | disablePass(&FuncletLayoutID); |
| 551 | } |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 552 | |
| 553 | AMDGPUTargetMachine &getAMDGPUTargetMachine() const { |
| 554 | return getTM<AMDGPUTargetMachine>(); |
| 555 | } |
| 556 | |
Matthias Braun | 115efcd | 2016-11-28 20:11:54 +0000 | [diff] [blame] | 557 | ScheduleDAGInstrs * |
| 558 | createMachineScheduler(MachineSchedContext *C) const override { |
| 559 | ScheduleDAGMILive *DAG = createGenericSchedLive(C); |
| 560 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 561 | DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 562 | return DAG; |
| 563 | } |
| 564 | |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 565 | void addEarlyCSEOrGVNPass(); |
| 566 | void addStraightLineScalarOptimizationPasses(); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 567 | void addIRPasses() override; |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 568 | void addCodeGenPrepare() override; |
Matt Arsenault | 0a10900 | 2015-09-25 17:41:20 +0000 | [diff] [blame] | 569 | bool addPreISel() override; |
| 570 | bool addInstSelector() override; |
| 571 | bool addGCPasses() override; |
Amara Emerson | d189680 | 2019-04-15 04:53:46 +0000 | [diff] [blame] | 572 | |
| 573 | std::unique_ptr<CSEConfigBase> getCSEConfig() const override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 574 | }; |
| 575 | |
Amara Emerson | d189680 | 2019-04-15 04:53:46 +0000 | [diff] [blame] | 576 | std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const { |
| 577 | return getStandardCSEConfigForOpt(TM->getOptLevel()); |
| 578 | } |
| 579 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 580 | class R600PassConfig final : public AMDGPUPassConfig { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 581 | public: |
Matthias Braun | bb8507e | 2017-10-12 22:57:28 +0000 | [diff] [blame] | 582 | R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 583 | : AMDGPUPassConfig(TM, PM) {} |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 584 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 585 | ScheduleDAGInstrs *createMachineScheduler( |
| 586 | MachineSchedContext *C) const override { |
| 587 | return createR600MachineScheduler(C); |
| 588 | } |
| 589 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 590 | bool addPreISel() override; |
Tom Stellard | 2028769 | 2017-08-08 04:57:55 +0000 | [diff] [blame] | 591 | bool addInstSelector() override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 592 | void addPreRegAlloc() override; |
| 593 | void addPreSched2() override; |
| 594 | void addPreEmitPass() override; |
| 595 | }; |
| 596 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 597 | class GCNPassConfig final : public AMDGPUPassConfig { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 598 | public: |
Matthias Braun | bb8507e | 2017-10-12 22:57:28 +0000 | [diff] [blame] | 599 | GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) |
Matt Arsenault | 6ed7b9b | 2017-08-02 01:31:28 +0000 | [diff] [blame] | 600 | : AMDGPUPassConfig(TM, PM) { |
Matt Arsenault | a202538 | 2017-08-03 23:24:05 +0000 | [diff] [blame] | 601 | // It is necessary to know the register usage of the entire call graph. We |
| 602 | // allow calls without EnableAMDGPUFunctionCalls if they are marked |
| 603 | // noinline, so this is always required. |
| 604 | setRequiresCodeGenSCCOrder(true); |
Matt Arsenault | 6ed7b9b | 2017-08-02 01:31:28 +0000 | [diff] [blame] | 605 | } |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 606 | |
| 607 | GCNTargetMachine &getGCNTargetMachine() const { |
| 608 | return getTM<GCNTargetMachine>(); |
| 609 | } |
| 610 | |
| 611 | ScheduleDAGInstrs * |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 612 | createMachineScheduler(MachineSchedContext *C) const override; |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 613 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 614 | bool addPreISel() override; |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 615 | void addMachineSSAOptimization() override; |
Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 616 | bool addILPOpts() override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 617 | bool addInstSelector() override; |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 618 | bool addIRTranslator() override; |
Tim Northover | 33b07d6 | 2016-07-22 20:03:43 +0000 | [diff] [blame] | 619 | bool addLegalizeMachineIR() override; |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 620 | bool addRegBankSelect() override; |
Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 621 | bool addGlobalInstructionSelect() override; |
Matt Arsenault | cf55a65 | 2019-03-19 19:33:12 +0000 | [diff] [blame] | 622 | void addFastRegAlloc() override; |
| 623 | void addOptimizedRegAlloc() override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 624 | void addPreRegAlloc() override; |
Stanislav Mekhanoshin | c29d491 | 2019-05-01 16:40:49 +0000 | [diff] [blame] | 625 | bool addPreRewrite() override; |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 626 | void addPostRegAlloc() override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 627 | void addPreSched2() override; |
| 628 | void addPreEmitPass() override; |
| 629 | }; |
| 630 | |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 631 | } // end anonymous namespace |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 632 | |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 633 | void AMDGPUPassConfig::addEarlyCSEOrGVNPass() { |
| 634 | if (getOptLevel() == CodeGenOpt::Aggressive) |
| 635 | addPass(createGVNPass()); |
| 636 | else |
| 637 | addPass(createEarlyCSEPass()); |
| 638 | } |
| 639 | |
| 640 | void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() { |
Stanislav Mekhanoshin | 20d4795 | 2018-06-29 16:26:53 +0000 | [diff] [blame] | 641 | addPass(createLICMPass()); |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 642 | addPass(createSeparateConstOffsetFromGEPPass()); |
| 643 | addPass(createSpeculativeExecutionPass()); |
| 644 | // ReassociateGEPs exposes more opportunites for SLSR. See |
| 645 | // the example in reassociate-geps-and-slsr.ll. |
| 646 | addPass(createStraightLineStrengthReducePass()); |
| 647 | // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or |
| 648 | // EarlyCSE can reuse. |
| 649 | addEarlyCSEOrGVNPass(); |
| 650 | // Run NaryReassociate after EarlyCSE/GVN to be more effective. |
| 651 | addPass(createNaryReassociatePass()); |
| 652 | // NaryReassociate on GEPs creates redundant common expressions, so run |
| 653 | // EarlyCSE after it. |
| 654 | addPass(createEarlyCSEPass()); |
| 655 | } |
| 656 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 657 | void AMDGPUPassConfig::addIRPasses() { |
Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 658 | const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine(); |
| 659 | |
Matt Arsenault | bde8034 | 2016-05-18 15:41:07 +0000 | [diff] [blame] | 660 | // There is no reason to run these. |
| 661 | disablePass(&StackMapLivenessID); |
| 662 | disablePass(&FuncletLayoutID); |
| 663 | disablePass(&PatchableFunctionID); |
| 664 | |
Stanislav Mekhanoshin | 4c9c98f | 2019-08-12 17:12:29 +0000 | [diff] [blame] | 665 | addPass(createAMDGPUPrintfRuntimeBinding()); |
| 666 | |
Stanislav Mekhanoshin | 3138278 | 2019-06-17 20:42:48 +0000 | [diff] [blame] | 667 | // This must occur before inlining, as the inliner will not look through |
| 668 | // bitcast calls. |
| 669 | addPass(createAMDGPUFixFunctionBitcastsPass()); |
| 670 | |
Stanislav Mekhanoshin | ad04e7a | 2019-06-17 17:47:28 +0000 | [diff] [blame] | 671 | // A call to propagate attributes pass in the backend in case opt was not run. |
| 672 | addPass(createAMDGPUPropagateAttributesEarlyPass(&TM)); |
| 673 | |
Matt Arsenault | ab41193 | 2018-10-02 03:50:56 +0000 | [diff] [blame] | 674 | addPass(createAtomicExpandPass()); |
Scott Linder | 11ef798 | 2018-10-26 13:18:36 +0000 | [diff] [blame] | 675 | |
Scott Linder | 11ef798 | 2018-10-26 13:18:36 +0000 | [diff] [blame] | 676 | |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 677 | addPass(createAMDGPULowerIntrinsicsPass()); |
Matt Arsenault | 0699ef3 | 2017-02-09 22:00:42 +0000 | [diff] [blame] | 678 | |
Matt Arsenault | 635d479 | 2018-10-03 02:47:25 +0000 | [diff] [blame] | 679 | // Function calls are not supported, so make sure we inline everything. |
| 680 | addPass(createAMDGPUAlwaysInlinePass()); |
| 681 | addPass(createAlwaysInlinerLegacyPass()); |
| 682 | // We need to add the barrier noop pass, otherwise adding the function |
| 683 | // inlining pass will cause all of the PassConfigs passes to be run |
| 684 | // one function at a time, which means if we have a nodule with two |
| 685 | // functions, then we will generate code for the first function |
| 686 | // without ever running any passes on the second. |
| 687 | addPass(createBarrierNoopPass()); |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 688 | |
Tom Stellard | fd25395 | 2015-08-07 23:19:30 +0000 | [diff] [blame] | 689 | // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments. |
Matt Arsenault | 432aaea | 2018-05-13 10:04:48 +0000 | [diff] [blame] | 690 | if (TM.getTargetTriple().getArch() == Triple::r600) |
| 691 | addPass(createR600OpenCLImageTypeLoweringPass()); |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 692 | |
Yaxun Liu | de4b88d | 2017-10-10 19:39:48 +0000 | [diff] [blame] | 693 | // Replace OpenCL enqueued block function pointers with global variables. |
| 694 | addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass()); |
| 695 | |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 696 | if (TM.getOptLevel() > CodeGenOpt::None) { |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 697 | addPass(createInferAddressSpacesPass()); |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 698 | addPass(createAMDGPUPromoteAlloca()); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 699 | |
| 700 | if (EnableSROA) |
| 701 | addPass(createSROAPass()); |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 702 | |
Nikita Popov | 3db93ac | 2019-04-07 17:22:16 +0000 | [diff] [blame] | 703 | if (EnableScalarIRPasses) |
| 704 | addStraightLineScalarOptimizationPasses(); |
Stanislav Mekhanoshin | 8e45acf | 2017-03-17 23:56:58 +0000 | [diff] [blame] | 705 | |
| 706 | if (EnableAMDGPUAliasAnalysis) { |
| 707 | addPass(createAMDGPUAAWrapperPass()); |
| 708 | addPass(createExternalAAWrapperPass([](Pass &P, Function &, |
| 709 | AAResults &AAR) { |
| 710 | if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>()) |
| 711 | AAR.addAAResult(WrapperPass->getResult()); |
| 712 | })); |
| 713 | } |
Konstantin Zhuravlyov | 4658e5f | 2016-09-30 16:39:24 +0000 | [diff] [blame] | 714 | } |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 715 | |
Matt Arsenault | 3b95986 | 2019-08-27 00:08:31 +0000 | [diff] [blame] | 716 | if (TM.getTargetTriple().getArch() == Triple::amdgcn) { |
| 717 | // TODO: May want to move later or split into an early and late one. |
| 718 | addPass(createAMDGPUCodeGenPreparePass()); |
| 719 | } |
| 720 | |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 721 | TargetPassConfig::addIRPasses(); |
| 722 | |
| 723 | // EarlyCSE is not always strong enough to clean up what LSR produces. For |
| 724 | // example, GVN can combine |
| 725 | // |
| 726 | // %0 = add %a, %b |
| 727 | // %1 = add %b, %a |
| 728 | // |
| 729 | // and |
| 730 | // |
| 731 | // %0 = shl nsw %a, 2 |
| 732 | // %1 = shl %a, 2 |
| 733 | // |
| 734 | // but EarlyCSE can do neither of them. |
Nikita Popov | 3db93ac | 2019-04-07 17:22:16 +0000 | [diff] [blame] | 735 | if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses) |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 736 | addEarlyCSEOrGVNPass(); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 737 | } |
| 738 | |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 739 | void AMDGPUPassConfig::addCodeGenPrepare() { |
Aakanksha Patil | c56d2af | 2019-03-07 00:54:04 +0000 | [diff] [blame] | 740 | if (TM->getTargetTriple().getArch() == Triple::amdgcn) |
| 741 | addPass(createAMDGPUAnnotateKernelFeaturesPass()); |
| 742 | |
Matt Arsenault | 8c4a352 | 2018-06-26 19:10:00 +0000 | [diff] [blame] | 743 | if (TM->getTargetTriple().getArch() == Triple::amdgcn && |
| 744 | EnableLowerKernelArguments) |
| 745 | addPass(createAMDGPULowerKernelArgumentsPass()); |
| 746 | |
Matt Arsenault | e7e23e3 | 2019-07-05 20:26:13 +0000 | [diff] [blame] | 747 | addPass(&AMDGPUPerfHintAnalysisID); |
| 748 | |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 749 | TargetPassConfig::addCodeGenPrepare(); |
| 750 | |
| 751 | if (EnableLoadStoreVectorizer) |
| 752 | addPass(createLoadStoreVectorizerPass()); |
| 753 | } |
| 754 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 755 | bool AMDGPUPassConfig::addPreISel() { |
Sameer Sahasrabuddhe | b4f2d1c | 2018-09-25 09:39:21 +0000 | [diff] [blame] | 756 | addPass(createLowerSwitchPass()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 757 | addPass(createFlattenCFGPass()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 758 | return false; |
| 759 | } |
| 760 | |
| 761 | bool AMDGPUPassConfig::addInstSelector() { |
Matt Arsenault | 9cac4e6 | 2019-06-19 00:25:39 +0000 | [diff] [blame] | 762 | // Defer the verifier until FinalizeISel. |
| 763 | addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 764 | return false; |
| 765 | } |
| 766 | |
Matt Arsenault | 0a10900 | 2015-09-25 17:41:20 +0000 | [diff] [blame] | 767 | bool AMDGPUPassConfig::addGCPasses() { |
| 768 | // Do nothing. GC is not supported. |
| 769 | return false; |
| 770 | } |
| 771 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 772 | //===----------------------------------------------------------------------===// |
| 773 | // R600 Pass Setup |
| 774 | //===----------------------------------------------------------------------===// |
| 775 | |
| 776 | bool R600PassConfig::addPreISel() { |
| 777 | AMDGPUPassConfig::addPreISel(); |
Matt Arsenault | c581611 | 2016-06-24 06:30:22 +0000 | [diff] [blame] | 778 | |
| 779 | if (EnableR600StructurizeCFG) |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 780 | addPass(createStructurizeCFGPass()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 781 | return false; |
| 782 | } |
| 783 | |
Tom Stellard | 2028769 | 2017-08-08 04:57:55 +0000 | [diff] [blame] | 784 | bool R600PassConfig::addInstSelector() { |
| 785 | addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel())); |
| 786 | return false; |
| 787 | } |
| 788 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 789 | void R600PassConfig::addPreRegAlloc() { |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 790 | addPass(createR600VectorRegMerger()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 791 | } |
| 792 | |
| 793 | void R600PassConfig::addPreSched2() { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 794 | addPass(createR600EmitClauseMarkers(), false); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 795 | if (EnableR600IfConvert) |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 796 | addPass(&IfConverterID, false); |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 797 | addPass(createR600ClauseMergePass(), false); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 798 | } |
| 799 | |
| 800 | void R600PassConfig::addPreEmitPass() { |
| 801 | addPass(createAMDGPUCFGStructurizerPass(), false); |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 802 | addPass(createR600ExpandSpecialInstrsPass(), false); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 803 | addPass(&FinalizeMachineBundlesID, false); |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 804 | addPass(createR600Packetizer(), false); |
| 805 | addPass(createR600ControlFlowFinalizer(), false); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 806 | } |
| 807 | |
| 808 | TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) { |
Matthias Braun | 5e394c3 | 2017-05-30 21:36:41 +0000 | [diff] [blame] | 809 | return new R600PassConfig(*this, PM); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 810 | } |
| 811 | |
| 812 | //===----------------------------------------------------------------------===// |
| 813 | // GCN Pass Setup |
| 814 | //===----------------------------------------------------------------------===// |
| 815 | |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 816 | ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler( |
| 817 | MachineSchedContext *C) const { |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 818 | const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>(); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 819 | if (ST.enableSIScheduler()) |
| 820 | return createSIMachineScheduler(C); |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 821 | return createGCNMaxOccupancyMachineScheduler(C); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 822 | } |
| 823 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 824 | bool GCNPassConfig::addPreISel() { |
| 825 | AMDGPUPassConfig::addPreISel(); |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 826 | |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 827 | if (EnableAtomicOptimizations) { |
| 828 | addPass(createAMDGPUAtomicOptimizerPass()); |
| 829 | } |
| 830 | |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 831 | // FIXME: We need to run a pass to propagate the attributes when calls are |
| 832 | // supported. |
Matt Arsenault | b8f8dbc | 2017-03-24 19:52:05 +0000 | [diff] [blame] | 833 | |
| 834 | // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit |
| 835 | // regions formed by them. |
| 836 | addPass(&AMDGPUUnifyDivergentExitNodesID); |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 837 | if (!LateCFGStructurize) { |
| 838 | addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions |
| 839 | } |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 840 | addPass(createSinkingPass()); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 841 | addPass(createAMDGPUAnnotateUniformValues()); |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 842 | if (!LateCFGStructurize) { |
| 843 | addPass(createSIAnnotateControlFlowPass()); |
| 844 | } |
Alexander Timofeev | 2ce560f | 2019-07-02 17:59:44 +0000 | [diff] [blame] | 845 | addPass(createLCSSAPass()); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 846 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 847 | return false; |
| 848 | } |
| 849 | |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 850 | void GCNPassConfig::addMachineSSAOptimization() { |
| 851 | TargetPassConfig::addMachineSSAOptimization(); |
| 852 | |
| 853 | // We want to fold operands after PeepholeOptimizer has run (or as part of |
| 854 | // it), because it will eliminate extra copies making it easier to fold the |
| 855 | // real source operand. We want to eliminate dead instructions after, so that |
| 856 | // we see fewer uses of the copies. We then need to clean up the dead |
| 857 | // instructions leftover after the operands are folded as well. |
| 858 | // |
| 859 | // XXX - Can we get away without running DeadMachineInstructionElim again? |
| 860 | addPass(&SIFoldOperandsID); |
Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 861 | if (EnableDPPCombine) |
| 862 | addPass(&GCNDPPCombineID); |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 863 | addPass(&DeadMachineInstructionElimID); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 864 | addPass(&SILoadStoreOptimizerID); |
Sam Kolton | 6e79529 | 2017-04-07 10:53:12 +0000 | [diff] [blame] | 865 | if (EnableSDWAPeephole) { |
| 866 | addPass(&SIPeepholeSDWAID); |
Matthias Braun | 4a7c8e7 | 2018-01-19 06:46:10 +0000 | [diff] [blame] | 867 | addPass(&EarlyMachineLICMID); |
Stanislav Mekhanoshin | 56ea488 | 2017-05-30 16:49:24 +0000 | [diff] [blame] | 868 | addPass(&MachineCSEID); |
| 869 | addPass(&SIFoldOperandsID); |
Sam Kolton | 6e79529 | 2017-04-07 10:53:12 +0000 | [diff] [blame] | 870 | addPass(&DeadMachineInstructionElimID); |
| 871 | } |
Stanislav Mekhanoshin | 0330660 | 2017-06-03 17:39:47 +0000 | [diff] [blame] | 872 | addPass(createSIShrinkInstructionsPass()); |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 873 | } |
| 874 | |
Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 875 | bool GCNPassConfig::addILPOpts() { |
| 876 | if (EnableEarlyIfConversion) |
| 877 | addPass(&EarlyIfConverterID); |
| 878 | |
| 879 | TargetPassConfig::addILPOpts(); |
| 880 | return false; |
| 881 | } |
| 882 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 883 | bool GCNPassConfig::addInstSelector() { |
| 884 | AMDGPUPassConfig::addInstSelector(); |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 885 | addPass(&SIFixSGPRCopiesID); |
Nicolai Haehnle | 814abb5 | 2018-10-31 13:27:08 +0000 | [diff] [blame] | 886 | addPass(createSILowerI1CopiesPass()); |
Ron Lieberman | cac749a | 2018-11-16 01:13:34 +0000 | [diff] [blame] | 887 | addPass(createSIFixupVectorISelPass()); |
David Stuttard | f77079f | 2019-01-14 11:55:24 +0000 | [diff] [blame] | 888 | addPass(createSIAddIMGInitPass()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 889 | return false; |
| 890 | } |
| 891 | |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 892 | bool GCNPassConfig::addIRTranslator() { |
| 893 | addPass(new IRTranslator()); |
| 894 | return false; |
| 895 | } |
| 896 | |
Tim Northover | 33b07d6 | 2016-07-22 20:03:43 +0000 | [diff] [blame] | 897 | bool GCNPassConfig::addLegalizeMachineIR() { |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 898 | addPass(new Legalizer()); |
Tim Northover | 33b07d6 | 2016-07-22 20:03:43 +0000 | [diff] [blame] | 899 | return false; |
| 900 | } |
| 901 | |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 902 | bool GCNPassConfig::addRegBankSelect() { |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 903 | addPass(new RegBankSelect()); |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 904 | return false; |
| 905 | } |
Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 906 | |
| 907 | bool GCNPassConfig::addGlobalInstructionSelect() { |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 908 | addPass(new InstructionSelect()); |
Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 909 | return false; |
| 910 | } |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 911 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 912 | void GCNPassConfig::addPreRegAlloc() { |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 913 | if (LateCFGStructurize) { |
| 914 | addPass(createAMDGPUMachineCFGStructurizerPass()); |
| 915 | } |
Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 916 | addPass(createSIWholeQuadModePass()); |
Matt Arsenault | b87fc22 | 2015-10-01 22:10:03 +0000 | [diff] [blame] | 917 | } |
| 918 | |
Matt Arsenault | cf55a65 | 2019-03-19 19:33:12 +0000 | [diff] [blame] | 919 | void GCNPassConfig::addFastRegAlloc() { |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 920 | // FIXME: We have to disable the verifier here because of PHIElimination + |
| 921 | // TwoAddressInstructions disabling it. |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 922 | |
| 923 | // This must be run immediately after phi elimination and before |
| 924 | // TwoAddressInstructions, otherwise the processing of the tied operand of |
| 925 | // SI_ELSE will introduce a copy of the tied operand source after the else. |
| 926 | insertPass(&PHIEliminationID, &SILowerControlFlowID, false); |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 927 | |
Neil Henning | 0a30f33 | 2019-04-01 15:19:52 +0000 | [diff] [blame] | 928 | // This must be run just after RegisterCoalescing. |
| 929 | insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false); |
Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 930 | |
Matt Arsenault | cf55a65 | 2019-03-19 19:33:12 +0000 | [diff] [blame] | 931 | TargetPassConfig::addFastRegAlloc(); |
Matt Arsenault | b87fc22 | 2015-10-01 22:10:03 +0000 | [diff] [blame] | 932 | } |
| 933 | |
Matt Arsenault | cf55a65 | 2019-03-19 19:33:12 +0000 | [diff] [blame] | 934 | void GCNPassConfig::addOptimizedRegAlloc() { |
Matt Arsenault | 4d47ac3 | 2019-03-27 16:58:30 +0000 | [diff] [blame] | 935 | if (OptExecMaskPreRA) { |
| 936 | insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID); |
| 937 | insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID); |
| 938 | } else { |
| 939 | insertPass(&MachineSchedulerID, &SIFormMemoryClausesID); |
| 940 | } |
Stanislav Mekhanoshin | 739174c | 2018-05-31 20:13:51 +0000 | [diff] [blame] | 941 | |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 942 | // This must be run immediately after phi elimination and before |
| 943 | // TwoAddressInstructions, otherwise the processing of the tied operand of |
| 944 | // SI_ELSE will introduce a copy of the tied operand source after the else. |
| 945 | insertPass(&PHIEliminationID, &SILowerControlFlowID, false); |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 946 | |
Neil Henning | 0a30f33 | 2019-04-01 15:19:52 +0000 | [diff] [blame] | 947 | // This must be run just after RegisterCoalescing. |
| 948 | insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false); |
Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 949 | |
Stanislav Mekhanoshin | c8f78f8 | 2019-04-05 20:11:32 +0000 | [diff] [blame] | 950 | if (EnableDCEInRA) |
| 951 | insertPass(&RenameIndependentSubregsID, &DeadMachineInstructionElimID); |
| 952 | |
Matt Arsenault | cf55a65 | 2019-03-19 19:33:12 +0000 | [diff] [blame] | 953 | TargetPassConfig::addOptimizedRegAlloc(); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 954 | } |
| 955 | |
Stanislav Mekhanoshin | c29d491 | 2019-05-01 16:40:49 +0000 | [diff] [blame] | 956 | bool GCNPassConfig::addPreRewrite() { |
| 957 | if (EnableRegReassign) { |
| 958 | addPass(&GCNNSAReassignID); |
Stanislav Mekhanoshin | 3b7925f | 2019-05-01 16:49:31 +0000 | [diff] [blame] | 959 | addPass(&GCNRegBankReassignID); |
Stanislav Mekhanoshin | c29d491 | 2019-05-01 16:40:49 +0000 | [diff] [blame] | 960 | } |
| 961 | return true; |
| 962 | } |
| 963 | |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 964 | void GCNPassConfig::addPostRegAlloc() { |
Stanislav Mekhanoshin | 22a56f2 | 2017-01-24 17:46:17 +0000 | [diff] [blame] | 965 | addPass(&SIFixVGPRCopiesID); |
Matt Arsenault | 105fc1a | 2018-11-26 17:02:02 +0000 | [diff] [blame] | 966 | if (getOptLevel() > CodeGenOpt::None) |
| 967 | addPass(&SIOptimizeExecMaskingID); |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 968 | TargetPassConfig::addPostRegAlloc(); |
Matt Arsenault | 5b0922f | 2019-07-03 23:32:29 +0000 | [diff] [blame] | 969 | |
| 970 | // Equivalent of PEI for SGPRs. |
| 971 | addPass(&SILowerSGPRSpillsID); |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 972 | } |
| 973 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 974 | void GCNPassConfig::addPreSched2() { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 975 | } |
| 976 | |
| 977 | void GCNPassConfig::addPreEmitPass() { |
Mark Searles | 72da47d | 2018-07-16 10:02:41 +0000 | [diff] [blame] | 978 | addPass(createSIMemoryLegalizerPass()); |
| 979 | addPass(createSIInsertWaitcntsPass()); |
| 980 | addPass(createSIShrinkInstructionsPass()); |
Tim Corringham | 4c4d2fe | 2018-12-10 12:06:10 +0000 | [diff] [blame] | 981 | addPass(createSIModeRegisterPass()); |
Mark Searles | 72da47d | 2018-07-16 10:02:41 +0000 | [diff] [blame] | 982 | |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 983 | // The hazard recognizer that runs as part of the post-ra scheduler does not |
Matt Arsenault | 254a645 | 2016-06-28 16:59:53 +0000 | [diff] [blame] | 984 | // guarantee to be able handle all hazards correctly. This is because if there |
| 985 | // are multiple scheduling regions in a basic block, the regions are scheduled |
| 986 | // bottom up, so when we begin to schedule a region we don't know what |
| 987 | // instructions were emitted directly before it. |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 988 | // |
Matt Arsenault | 254a645 | 2016-06-28 16:59:53 +0000 | [diff] [blame] | 989 | // Here we add a stand-alone hazard recognizer pass which can handle all |
| 990 | // cases. |
Mark Searles | 72da47d | 2018-07-16 10:02:41 +0000 | [diff] [blame] | 991 | // |
| 992 | // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would |
| 993 | // be better for it to emit S_NOP <N> when possible. |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 994 | addPass(&PostRAHazardRecognizerID); |
| 995 | |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 996 | addPass(&SIInsertSkipsPassID); |
Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 997 | addPass(&BranchRelaxationPassID); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 998 | } |
| 999 | |
| 1000 | TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) { |
Matthias Braun | 5e394c3 | 2017-05-30 21:36:41 +0000 | [diff] [blame] | 1001 | return new GCNPassConfig(*this, PM); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 1002 | } |
Matt Arsenault | bc6d07c | 2019-03-14 22:54:43 +0000 | [diff] [blame] | 1003 | |
| 1004 | yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const { |
| 1005 | return new yaml::SIMachineFunctionInfo(); |
| 1006 | } |
| 1007 | |
| 1008 | yaml::MachineFunctionInfo * |
| 1009 | GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { |
| 1010 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 1011 | return new yaml::SIMachineFunctionInfo(*MFI, |
| 1012 | *MF.getSubtarget().getRegisterInfo()); |
| 1013 | } |
| 1014 | |
| 1015 | bool GCNTargetMachine::parseMachineFunctionInfo( |
| 1016 | const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS, |
| 1017 | SMDiagnostic &Error, SMRange &SourceRange) const { |
| 1018 | const yaml::SIMachineFunctionInfo &YamlMFI = |
| 1019 | reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_); |
| 1020 | MachineFunction &MF = PFS.MF; |
| 1021 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 1022 | |
| 1023 | MFI->initializeBaseYamlFields(YamlMFI); |
| 1024 | |
| 1025 | auto parseRegister = [&](const yaml::StringValue &RegName, unsigned &RegVal) { |
| 1026 | if (parseNamedRegisterReference(PFS, RegVal, RegName.Value, Error)) { |
| 1027 | SourceRange = RegName.SourceRange; |
| 1028 | return true; |
| 1029 | } |
| 1030 | |
| 1031 | return false; |
| 1032 | }; |
| 1033 | |
| 1034 | auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) { |
| 1035 | // Create a diagnostic for a the register string literal. |
| 1036 | const MemoryBuffer &Buffer = |
| 1037 | *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID()); |
| 1038 | Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, |
| 1039 | RegName.Value.size(), SourceMgr::DK_Error, |
| 1040 | "incorrect register class for field", RegName.Value, |
| 1041 | None, None); |
| 1042 | SourceRange = RegName.SourceRange; |
| 1043 | return true; |
| 1044 | }; |
| 1045 | |
| 1046 | if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) || |
| 1047 | parseRegister(YamlMFI.ScratchWaveOffsetReg, MFI->ScratchWaveOffsetReg) || |
| 1048 | parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) || |
| 1049 | parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg)) |
| 1050 | return true; |
| 1051 | |
| 1052 | if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG && |
Matt Arsenault | 12994a7 | 2019-10-10 07:11:33 +0000 | [diff] [blame] | 1053 | !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) { |
Matt Arsenault | bc6d07c | 2019-03-14 22:54:43 +0000 | [diff] [blame] | 1054 | return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg); |
| 1055 | } |
| 1056 | |
| 1057 | if (MFI->ScratchWaveOffsetReg != AMDGPU::SCRATCH_WAVE_OFFSET_REG && |
| 1058 | !AMDGPU::SGPR_32RegClass.contains(MFI->ScratchWaveOffsetReg)) { |
| 1059 | return diagnoseRegisterClass(YamlMFI.ScratchWaveOffsetReg); |
| 1060 | } |
| 1061 | |
| 1062 | if (MFI->FrameOffsetReg != AMDGPU::FP_REG && |
| 1063 | !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) { |
| 1064 | return diagnoseRegisterClass(YamlMFI.FrameOffsetReg); |
| 1065 | } |
| 1066 | |
| 1067 | if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG && |
| 1068 | !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) { |
| 1069 | return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg); |
| 1070 | } |
| 1071 | |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1072 | auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A, |
| 1073 | const TargetRegisterClass &RC, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1074 | ArgDescriptor &Arg, unsigned UserSGPRs, |
| 1075 | unsigned SystemSGPRs) { |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1076 | // Skip parsing if it's not present. |
| 1077 | if (!A) |
| 1078 | return false; |
| 1079 | |
| 1080 | if (A->IsRegister) { |
| 1081 | unsigned Reg; |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1082 | if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) { |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1083 | SourceRange = A->RegisterName.SourceRange; |
| 1084 | return true; |
| 1085 | } |
| 1086 | if (!RC.contains(Reg)) |
| 1087 | return diagnoseRegisterClass(A->RegisterName); |
| 1088 | Arg = ArgDescriptor::createRegister(Reg); |
| 1089 | } else |
| 1090 | Arg = ArgDescriptor::createStack(A->StackOffset); |
| 1091 | // Check and apply the optional mask. |
| 1092 | if (A->Mask) |
| 1093 | Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue()); |
| 1094 | |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1095 | MFI->NumUserSGPRs += UserSGPRs; |
| 1096 | MFI->NumSystemSGPRs += SystemSGPRs; |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1097 | return false; |
| 1098 | }; |
| 1099 | |
| 1100 | if (YamlMFI.ArgInfo && |
| 1101 | (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer, |
Matt Arsenault | 12994a7 | 2019-10-10 07:11:33 +0000 | [diff] [blame] | 1102 | AMDGPU::SGPR_128RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1103 | MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1104 | parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1105 | AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr, |
| 1106 | 2, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1107 | parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1108 | MFI->ArgInfo.QueuePtr, 2, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1109 | parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr, |
| 1110 | AMDGPU::SReg_64RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1111 | MFI->ArgInfo.KernargSegmentPtr, 2, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1112 | parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1113 | AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID, |
| 1114 | 2, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1115 | parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit, |
| 1116 | AMDGPU::SReg_64RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1117 | MFI->ArgInfo.FlatScratchInit, 2, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1118 | parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize, |
| 1119 | AMDGPU::SGPR_32RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1120 | MFI->ArgInfo.PrivateSegmentSize, 0, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1121 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1122 | AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX, |
| 1123 | 0, 1) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1124 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1125 | AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY, |
| 1126 | 0, 1) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1127 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1128 | AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ, |
| 1129 | 0, 1) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1130 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo, |
| 1131 | AMDGPU::SGPR_32RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1132 | MFI->ArgInfo.WorkGroupInfo, 0, 1) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1133 | parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset, |
| 1134 | AMDGPU::SGPR_32RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1135 | MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1136 | parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr, |
| 1137 | AMDGPU::SReg_64RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1138 | MFI->ArgInfo.ImplicitArgPtr, 0, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1139 | parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr, |
| 1140 | AMDGPU::SReg_64RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1141 | MFI->ArgInfo.ImplicitBufferPtr, 2, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1142 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX, |
| 1143 | AMDGPU::VGPR_32RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1144 | MFI->ArgInfo.WorkItemIDX, 0, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1145 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY, |
| 1146 | AMDGPU::VGPR_32RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1147 | MFI->ArgInfo.WorkItemIDY, 0, 0) || |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1148 | parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ, |
| 1149 | AMDGPU::VGPR_32RegClass, |
Michael Liao | b3f967d | 2019-07-16 15:57:12 +0000 | [diff] [blame] | 1150 | MFI->ArgInfo.WorkItemIDZ, 0, 0))) |
Michael Liao | 80177ca | 2019-07-03 02:00:21 +0000 | [diff] [blame] | 1151 | return true; |
| 1152 | |
Matt Arsenault | 58426a3 | 2019-07-10 16:09:26 +0000 | [diff] [blame] | 1153 | MFI->Mode.IEEE = YamlMFI.Mode.IEEE; |
| 1154 | MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp; |
Matt Arsenault | 19e7f8a | 2019-10-27 23:38:52 -0700 | [diff] [blame] | 1155 | MFI->Mode.FP32Denormals = YamlMFI.Mode.FP32Denormals; |
| 1156 | MFI->Mode.FP64FP16Denormals = YamlMFI.Mode.FP64FP16Denormals; |
Matt Arsenault | 58426a3 | 2019-07-10 16:09:26 +0000 | [diff] [blame] | 1157 | |
Matt Arsenault | bc6d07c | 2019-03-14 22:54:43 +0000 | [diff] [blame] | 1158 | return false; |
| 1159 | } |