Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 1 | //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===// |
| 2 | // |
Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | /// \file |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 10 | /// The AMDGPU target machine contains all of the hardware specific |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 11 | /// information needed to emit code for R600 and SI GPUs. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "AMDGPUTargetMachine.h" |
| 16 | #include "AMDGPU.h" |
Stanislav Mekhanoshin | 8e45acf | 2017-03-17 23:56:58 +0000 | [diff] [blame] | 17 | #include "AMDGPUAliasAnalysis.h" |
Matt Arsenault | eb9025d | 2016-06-28 17:42:09 +0000 | [diff] [blame] | 18 | #include "AMDGPUCallLowering.h" |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 19 | #include "AMDGPUInstructionSelector.h" |
| 20 | #include "AMDGPULegalizerInfo.h" |
Matt Arsenault | 9aa45f0 | 2017-07-06 20:57:05 +0000 | [diff] [blame] | 21 | #include "AMDGPUMacroFusion.h" |
Matt Arsenault | eb9025d | 2016-06-28 17:42:09 +0000 | [diff] [blame] | 22 | #include "AMDGPUTargetObjectFile.h" |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 23 | #include "AMDGPUTargetTransformInfo.h" |
Valery Pykhtin | fd4c410 | 2017-03-21 13:15:46 +0000 | [diff] [blame] | 24 | #include "GCNIterativeScheduler.h" |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 25 | #include "GCNSchedStrategy.h" |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 26 | #include "R600MachineScheduler.h" |
Matt Arsenault | bc6d07c | 2019-03-14 22:54:43 +0000 | [diff] [blame] | 27 | #include "SIMachineFunctionInfo.h" |
Matt Arsenault | 2ffe8fd | 2016-08-11 19:18:50 +0000 | [diff] [blame] | 28 | #include "SIMachineScheduler.h" |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 29 | #include "llvm/CodeGen/GlobalISel/IRTranslator.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 30 | #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 31 | #include "llvm/CodeGen/GlobalISel/Legalizer.h" |
| 32 | #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" |
Matt Arsenault | bc6d07c | 2019-03-14 22:54:43 +0000 | [diff] [blame] | 33 | #include "llvm/CodeGen/MIRParser/MIParser.h" |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 34 | #include "llvm/CodeGen/Passes.h" |
Matthias Braun | 31d19d4 | 2016-05-10 03:21:59 +0000 | [diff] [blame] | 35 | #include "llvm/CodeGen/TargetPassConfig.h" |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 36 | #include "llvm/IR/Attributes.h" |
| 37 | #include "llvm/IR/Function.h" |
Stanislav Mekhanoshin | 50ea93a | 2016-12-08 19:46:04 +0000 | [diff] [blame] | 38 | #include "llvm/IR/LegacyPassManager.h" |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 39 | #include "llvm/Pass.h" |
| 40 | #include "llvm/Support/CommandLine.h" |
| 41 | #include "llvm/Support/Compiler.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 42 | #include "llvm/Support/TargetRegistry.h" |
David Blaikie | 6054e65 | 2018-03-23 23:58:19 +0000 | [diff] [blame] | 43 | #include "llvm/Target/TargetLoweringObjectFile.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 44 | #include "llvm/Transforms/IPO.h" |
| 45 | #include "llvm/Transforms/IPO/AlwaysInliner.h" |
| 46 | #include "llvm/Transforms/IPO/PassManagerBuilder.h" |
| 47 | #include "llvm/Transforms/Scalar.h" |
| 48 | #include "llvm/Transforms/Scalar/GVN.h" |
Sameer Sahasrabuddhe | b4f2d1c | 2018-09-25 09:39:21 +0000 | [diff] [blame] | 49 | #include "llvm/Transforms/Utils.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 50 | #include "llvm/Transforms/Vectorize.h" |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 51 | #include <memory> |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 52 | |
| 53 | using namespace llvm; |
| 54 | |
Matt Arsenault | c581611 | 2016-06-24 06:30:22 +0000 | [diff] [blame] | 55 | static cl::opt<bool> EnableR600StructurizeCFG( |
| 56 | "r600-ir-structurize", |
| 57 | cl::desc("Use StructurizeCFG IR pass"), |
| 58 | cl::init(true)); |
| 59 | |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 60 | static cl::opt<bool> EnableSROA( |
| 61 | "amdgpu-sroa", |
| 62 | cl::desc("Run SROA after promote alloca pass"), |
| 63 | cl::ReallyHidden, |
| 64 | cl::init(true)); |
| 65 | |
Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 66 | static cl::opt<bool> |
| 67 | EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, |
| 68 | cl::desc("Run early if-conversion"), |
| 69 | cl::init(false)); |
| 70 | |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 71 | static cl::opt<bool> EnableR600IfConvert( |
| 72 | "r600-if-convert", |
| 73 | cl::desc("Use if conversion pass"), |
| 74 | cl::ReallyHidden, |
| 75 | cl::init(true)); |
| 76 | |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 77 | // Option to disable vectorizer for tests. |
| 78 | static cl::opt<bool> EnableLoadStoreVectorizer( |
| 79 | "amdgpu-load-store-vectorizer", |
| 80 | cl::desc("Enable load store vectorizer"), |
Matt Arsenault | 0efdd06 | 2016-09-09 22:29:28 +0000 | [diff] [blame] | 81 | cl::init(true), |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 82 | cl::Hidden); |
| 83 | |
Hiroshi Inoue | c8e9245 | 2018-01-29 05:17:03 +0000 | [diff] [blame] | 84 | // Option to control global loads scalarization |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 85 | static cl::opt<bool> ScalarizeGlobal( |
| 86 | "amdgpu-scalarize-global-loads", |
| 87 | cl::desc("Enable global load scalarization"), |
Alexander Timofeev | 982aee6 | 2017-07-04 17:32:00 +0000 | [diff] [blame] | 88 | cl::init(true), |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 89 | cl::Hidden); |
| 90 | |
Stanislav Mekhanoshin | a3b7279 | 2017-01-30 21:05:18 +0000 | [diff] [blame] | 91 | // Option to run internalize pass. |
| 92 | static cl::opt<bool> InternalizeSymbols( |
| 93 | "amdgpu-internalize-symbols", |
| 94 | cl::desc("Enable elimination of non-kernel functions and unused globals"), |
| 95 | cl::init(false), |
| 96 | cl::Hidden); |
| 97 | |
Stanislav Mekhanoshin | 9053f22 | 2017-03-28 18:23:24 +0000 | [diff] [blame] | 98 | // Option to inline all early. |
| 99 | static cl::opt<bool> EarlyInlineAll( |
| 100 | "amdgpu-early-inline-all", |
| 101 | cl::desc("Inline all functions early"), |
| 102 | cl::init(false), |
| 103 | cl::Hidden); |
| 104 | |
Sam Kolton | f60ad58 | 2017-03-21 12:51:34 +0000 | [diff] [blame] | 105 | static cl::opt<bool> EnableSDWAPeephole( |
| 106 | "amdgpu-sdwa-peephole", |
| 107 | cl::desc("Enable SDWA peepholer"), |
Sam Kolton | 9fa1696 | 2017-04-06 15:03:28 +0000 | [diff] [blame] | 108 | cl::init(true)); |
Sam Kolton | f60ad58 | 2017-03-21 12:51:34 +0000 | [diff] [blame] | 109 | |
Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 110 | static cl::opt<bool> EnableDPPCombine( |
| 111 | "amdgpu-dpp-combine", |
| 112 | cl::desc("Enable DPP combiner"), |
Valery Pykhtin | ded96df | 2019-02-11 11:15:03 +0000 | [diff] [blame] | 113 | cl::init(true)); |
Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 114 | |
Stanislav Mekhanoshin | 8e45acf | 2017-03-17 23:56:58 +0000 | [diff] [blame] | 115 | // Enable address space based alias analysis |
| 116 | static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, |
| 117 | cl::desc("Enable AMDGPU Alias Analysis"), |
| 118 | cl::init(true)); |
| 119 | |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 120 | // Option to run late CFG structurizer |
Matt Arsenault | cc85223 | 2017-10-10 20:22:07 +0000 | [diff] [blame] | 121 | static cl::opt<bool, true> LateCFGStructurize( |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 122 | "amdgpu-late-structurize", |
| 123 | cl::desc("Enable late CFG structurization"), |
Matt Arsenault | cc85223 | 2017-10-10 20:22:07 +0000 | [diff] [blame] | 124 | cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 125 | cl::Hidden); |
| 126 | |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 127 | static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt( |
Matt Arsenault | b62a4eb | 2017-08-01 19:54:18 +0000 | [diff] [blame] | 128 | "amdgpu-function-calls", |
Matt Arsenault | b62a4eb | 2017-08-01 19:54:18 +0000 | [diff] [blame] | 129 | cl::desc("Enable AMDGPU function call support"), |
Matt Arsenault | a680199 | 2018-07-10 14:03:41 +0000 | [diff] [blame] | 130 | cl::location(AMDGPUTargetMachine::EnableFunctionCalls), |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 131 | cl::init(true), |
Matt Arsenault | a680199 | 2018-07-10 14:03:41 +0000 | [diff] [blame] | 132 | cl::Hidden); |
Matt Arsenault | b62a4eb | 2017-08-01 19:54:18 +0000 | [diff] [blame] | 133 | |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 134 | // Enable lib calls simplifications |
| 135 | static cl::opt<bool> EnableLibCallSimplify( |
| 136 | "amdgpu-simplify-libcall", |
Matt Arsenault | 2e4d338 | 2018-05-29 19:35:46 +0000 | [diff] [blame] | 137 | cl::desc("Enable amdgpu library simplifications"), |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 138 | cl::init(true), |
| 139 | cl::Hidden); |
| 140 | |
Matt Arsenault | 8c4a352 | 2018-06-26 19:10:00 +0000 | [diff] [blame] | 141 | static cl::opt<bool> EnableLowerKernelArguments( |
| 142 | "amdgpu-ir-lower-kernel-arguments", |
| 143 | cl::desc("Lower kernel argument loads in IR pass"), |
| 144 | cl::init(true), |
| 145 | cl::Hidden); |
| 146 | |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 147 | // Enable atomic optimization |
| 148 | static cl::opt<bool> EnableAtomicOptimizations( |
| 149 | "amdgpu-atomic-optimizations", |
| 150 | cl::desc("Enable atomic optimizations"), |
| 151 | cl::init(false), |
| 152 | cl::Hidden); |
| 153 | |
Tim Corringham | 4c4d2fe | 2018-12-10 12:06:10 +0000 | [diff] [blame] | 154 | // Enable Mode register optimization |
| 155 | static cl::opt<bool> EnableSIModeRegisterPass( |
| 156 | "amdgpu-mode-register", |
| 157 | cl::desc("Enable mode register pass"), |
| 158 | cl::init(true), |
| 159 | cl::Hidden); |
| 160 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 161 | extern "C" void LLVMInitializeAMDGPUTarget() { |
| 162 | // Register the target |
Mehdi Amini | f42454b | 2016-10-09 23:00:34 +0000 | [diff] [blame] | 163 | RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget()); |
| 164 | RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget()); |
Matt Arsenault | b87fc22 | 2015-10-01 22:10:03 +0000 | [diff] [blame] | 165 | |
| 166 | PassRegistry *PR = PassRegistry::getPassRegistry(); |
Tom Stellard | a2f57be | 2017-08-02 22:19:45 +0000 | [diff] [blame] | 167 | initializeR600ClauseMergePassPass(*PR); |
| 168 | initializeR600ControlFlowFinalizerPass(*PR); |
| 169 | initializeR600PacketizerPass(*PR); |
| 170 | initializeR600ExpandSpecialInstrsPassPass(*PR); |
| 171 | initializeR600VectorRegMergerPass(*PR); |
Tom Stellard | e753c52 | 2018-04-09 16:09:13 +0000 | [diff] [blame] | 172 | initializeGlobalISel(*PR); |
Matt Arsenault | 7016f13 | 2017-08-03 22:30:46 +0000 | [diff] [blame] | 173 | initializeAMDGPUDAGToDAGISelPass(*PR); |
Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 174 | initializeGCNDPPCombinePass(*PR); |
Matt Arsenault | 8c0ef8b | 2015-10-12 17:43:59 +0000 | [diff] [blame] | 175 | initializeSILowerI1CopiesPass(*PR); |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 176 | initializeSIFixSGPRCopiesPass(*PR); |
Stanislav Mekhanoshin | 22a56f2 | 2017-01-24 17:46:17 +0000 | [diff] [blame] | 177 | initializeSIFixVGPRCopiesPass(*PR); |
Ron Lieberman | cac749a | 2018-11-16 01:13:34 +0000 | [diff] [blame] | 178 | initializeSIFixupVectorISelPass(*PR); |
Matt Arsenault | 8c0ef8b | 2015-10-12 17:43:59 +0000 | [diff] [blame] | 179 | initializeSIFoldOperandsPass(*PR); |
Sam Kolton | f60ad58 | 2017-03-21 12:51:34 +0000 | [diff] [blame] | 180 | initializeSIPeepholeSDWAPass(*PR); |
Matt Arsenault | c3a01ec | 2016-06-09 23:18:47 +0000 | [diff] [blame] | 181 | initializeSIShrinkInstructionsPass(*PR); |
Stanislav Mekhanoshin | 37e7f95 | 2017-08-01 23:14:32 +0000 | [diff] [blame] | 182 | initializeSIOptimizeExecMaskingPreRAPass(*PR); |
Matt Arsenault | 187276f | 2015-10-07 00:42:53 +0000 | [diff] [blame] | 183 | initializeSILoadStoreOptimizerPass(*PR); |
Scott Linder | 11ef798 | 2018-10-26 13:18:36 +0000 | [diff] [blame] | 184 | initializeAMDGPUFixFunctionBitcastsPass(*PR); |
Matt Arsenault | 746e065 | 2017-06-02 18:02:42 +0000 | [diff] [blame] | 185 | initializeAMDGPUAlwaysInlinePass(*PR); |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 186 | initializeAMDGPUAnnotateKernelFeaturesPass(*PR); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 187 | initializeAMDGPUAnnotateUniformValuesPass(*PR); |
Matt Arsenault | 7016f13 | 2017-08-03 22:30:46 +0000 | [diff] [blame] | 188 | initializeAMDGPUArgumentUsageInfoPass(*PR); |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 189 | initializeAMDGPUAtomicOptimizerPass(*PR); |
Matt Arsenault | 8c4a352 | 2018-06-26 19:10:00 +0000 | [diff] [blame] | 190 | initializeAMDGPULowerKernelArgumentsPass(*PR); |
Matt Arsenault | 372d796 | 2018-05-18 21:35:00 +0000 | [diff] [blame] | 191 | initializeAMDGPULowerKernelAttributesPass(*PR); |
Matt Arsenault | 0699ef3 | 2017-02-09 22:00:42 +0000 | [diff] [blame] | 192 | initializeAMDGPULowerIntrinsicsPass(*PR); |
Yaxun Liu | de4b88d | 2017-10-10 19:39:48 +0000 | [diff] [blame] | 193 | initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR); |
Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 194 | initializeAMDGPUPromoteAllocaPass(*PR); |
Matt Arsenault | 86de486 | 2016-06-24 07:07:55 +0000 | [diff] [blame] | 195 | initializeAMDGPUCodeGenPreparePass(*PR); |
Matt Arsenault | c06574f | 2017-07-28 18:40:05 +0000 | [diff] [blame] | 196 | initializeAMDGPURewriteOutArgumentsPass(*PR); |
Stanislav Mekhanoshin | 50ea93a | 2016-12-08 19:46:04 +0000 | [diff] [blame] | 197 | initializeAMDGPUUnifyMetadataPass(*PR); |
Tom Stellard | 77a1777 | 2016-01-20 15:48:27 +0000 | [diff] [blame] | 198 | initializeSIAnnotateControlFlowPass(*PR); |
Kannan Narayanan | acb089e | 2017-04-12 03:25:12 +0000 | [diff] [blame] | 199 | initializeSIInsertWaitcntsPass(*PR); |
Tim Corringham | 4c4d2fe | 2018-12-10 12:06:10 +0000 | [diff] [blame] | 200 | initializeSIModeRegisterPass(*PR); |
Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 201 | initializeSIWholeQuadModePass(*PR); |
Matt Arsenault | 55d49cf | 2016-02-12 02:16:10 +0000 | [diff] [blame] | 202 | initializeSILowerControlFlowPass(*PR); |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 203 | initializeSIInsertSkipsPass(*PR); |
Konstantin Zhuravlyov | e9a5a77 | 2017-07-21 21:19:23 +0000 | [diff] [blame] | 204 | initializeSIMemoryLegalizerPass(*PR); |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 205 | initializeSIOptimizeExecMaskingPass(*PR); |
Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 206 | initializeSIFixWWMLivenessPass(*PR); |
Stanislav Mekhanoshin | 739174c | 2018-05-31 20:13:51 +0000 | [diff] [blame] | 207 | initializeSIFormMemoryClausesPass(*PR); |
Matt Arsenault | b8f8dbc | 2017-03-24 19:52:05 +0000 | [diff] [blame] | 208 | initializeAMDGPUUnifyDivergentExitNodesPass(*PR); |
Stanislav Mekhanoshin | 8e45acf | 2017-03-17 23:56:58 +0000 | [diff] [blame] | 209 | initializeAMDGPUAAWrapperPassPass(*PR); |
Matt Arsenault | 8ba740a | 2018-11-07 20:26:42 +0000 | [diff] [blame] | 210 | initializeAMDGPUExternalAAWrapperPass(*PR); |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 211 | initializeAMDGPUUseNativeCallsPass(*PR); |
| 212 | initializeAMDGPUSimplifyLibCallsPass(*PR); |
Stanislav Mekhanoshin | 5670e6d | 2017-09-20 04:25:58 +0000 | [diff] [blame] | 213 | initializeAMDGPUInlinerPass(*PR); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 214 | } |
| 215 | |
Tom Stellard | e135ffd | 2015-09-25 21:41:28 +0000 | [diff] [blame] | 216 | static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 217 | return llvm::make_unique<AMDGPUTargetObjectFile>(); |
Tom Stellard | e135ffd | 2015-09-25 21:41:28 +0000 | [diff] [blame] | 218 | } |
| 219 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 220 | static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) { |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 221 | return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 222 | } |
| 223 | |
Matt Arsenault | 2ffe8fd | 2016-08-11 19:18:50 +0000 | [diff] [blame] | 224 | static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) { |
| 225 | return new SIScheduleDAGMI(C); |
| 226 | } |
| 227 | |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 228 | static ScheduleDAGInstrs * |
| 229 | createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { |
| 230 | ScheduleDAGMILive *DAG = |
Stanislav Mekhanoshin | 582a523 | 2017-02-15 17:19:50 +0000 | [diff] [blame] | 231 | new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C)); |
Matthias Braun | 115efcd | 2016-11-28 20:11:54 +0000 | [diff] [blame] | 232 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 233 | DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
Matt Arsenault | 9aa45f0 | 2017-07-06 20:57:05 +0000 | [diff] [blame] | 234 | DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 235 | return DAG; |
| 236 | } |
| 237 | |
Valery Pykhtin | fd4c410 | 2017-03-21 13:15:46 +0000 | [diff] [blame] | 238 | static ScheduleDAGInstrs * |
| 239 | createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) { |
| 240 | auto DAG = new GCNIterativeScheduler(C, |
| 241 | GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY); |
| 242 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 243 | DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 244 | return DAG; |
| 245 | } |
| 246 | |
| 247 | static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) { |
| 248 | return new GCNIterativeScheduler(C, |
| 249 | GCNIterativeScheduler::SCHEDULE_MINREGFORCED); |
| 250 | } |
| 251 | |
Valery Pykhtin | f2fe972 | 2017-11-20 14:35:53 +0000 | [diff] [blame] | 252 | static ScheduleDAGInstrs * |
| 253 | createIterativeILPMachineScheduler(MachineSchedContext *C) { |
| 254 | auto DAG = new GCNIterativeScheduler(C, |
| 255 | GCNIterativeScheduler::SCHEDULE_ILP); |
| 256 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 257 | DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 258 | DAG->addMutation(createAMDGPUMacroFusionDAGMutation()); |
| 259 | return DAG; |
| 260 | } |
| 261 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 262 | static MachineSchedRegistry |
Nicolai Haehnle | 02c3291 | 2016-01-13 16:10:10 +0000 | [diff] [blame] | 263 | R600SchedRegistry("r600", "Run R600's custom scheduler", |
| 264 | createR600MachineScheduler); |
| 265 | |
| 266 | static MachineSchedRegistry |
| 267 | SISchedRegistry("si", "Run SI's custom scheduler", |
| 268 | createSIMachineScheduler); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 269 | |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 270 | static MachineSchedRegistry |
| 271 | GCNMaxOccupancySchedRegistry("gcn-max-occupancy", |
| 272 | "Run GCN scheduler to maximize occupancy", |
| 273 | createGCNMaxOccupancyMachineScheduler); |
| 274 | |
Valery Pykhtin | fd4c410 | 2017-03-21 13:15:46 +0000 | [diff] [blame] | 275 | static MachineSchedRegistry |
| 276 | IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental", |
| 277 | "Run GCN scheduler to maximize occupancy (experimental)", |
| 278 | createIterativeGCNMaxOccupancyMachineScheduler); |
| 279 | |
| 280 | static MachineSchedRegistry |
| 281 | GCNMinRegSchedRegistry("gcn-minreg", |
| 282 | "Run GCN iterative scheduler for minimal register usage (experimental)", |
| 283 | createMinRegScheduler); |
| 284 | |
Valery Pykhtin | f2fe972 | 2017-11-20 14:35:53 +0000 | [diff] [blame] | 285 | static MachineSchedRegistry |
| 286 | GCNILPSchedRegistry("gcn-ilp", |
| 287 | "Run GCN iterative scheduler for ILP scheduling (experimental)", |
| 288 | createIterativeILPMachineScheduler); |
| 289 | |
Matt Arsenault | ec30eb5 | 2016-05-31 16:57:45 +0000 | [diff] [blame] | 290 | static StringRef computeDataLayout(const Triple &TT) { |
| 291 | if (TT.getArch() == Triple::r600) { |
| 292 | // 32-bit pointers. |
Yaxun Liu | cc56a8b | 2017-11-06 14:32:33 +0000 | [diff] [blame] | 293 | return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" |
Matt Arsenault | 95329f8 | 2018-03-27 19:26:40 +0000 | [diff] [blame] | 294 | "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 295 | } |
| 296 | |
Matt Arsenault | ec30eb5 | 2016-05-31 16:57:45 +0000 | [diff] [blame] | 297 | // 32-bit private, local, and region pointers. 64-bit global, constant and |
Neil Henning | 523dab0 | 2019-03-18 14:44:28 +0000 | [diff] [blame^] | 298 | // flat, non-integral buffer fat pointers. |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 299 | return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32" |
Matt Arsenault | ec30eb5 | 2016-05-31 16:57:45 +0000 | [diff] [blame] | 300 | "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" |
Neil Henning | 523dab0 | 2019-03-18 14:44:28 +0000 | [diff] [blame^] | 301 | "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" |
| 302 | "-ni:7"; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 303 | } |
| 304 | |
Matt Arsenault | b22828f | 2016-01-27 02:17:49 +0000 | [diff] [blame] | 305 | LLVM_READNONE |
| 306 | static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) { |
| 307 | if (!GPU.empty()) |
| 308 | return GPU; |
| 309 | |
Matt Arsenault | e0c1f9e | 2019-03-17 21:31:35 +0000 | [diff] [blame] | 310 | // Need to default to a target with flat support for HSA. |
Matt Arsenault | b22828f | 2016-01-27 02:17:49 +0000 | [diff] [blame] | 311 | if (TT.getArch() == Triple::amdgcn) |
Matt Arsenault | e0c1f9e | 2019-03-17 21:31:35 +0000 | [diff] [blame] | 312 | return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic"; |
Matt Arsenault | b22828f | 2016-01-27 02:17:49 +0000 | [diff] [blame] | 313 | |
Matt Arsenault | 8e00194 | 2016-06-02 18:37:16 +0000 | [diff] [blame] | 314 | return "r600"; |
Matt Arsenault | b22828f | 2016-01-27 02:17:49 +0000 | [diff] [blame] | 315 | } |
| 316 | |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 317 | static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { |
Tom Stellard | 418beb7 | 2016-07-13 14:23:33 +0000 | [diff] [blame] | 318 | // The AMDGPU toolchain only supports generating shared objects, so we |
| 319 | // must always use PIC. |
| 320 | return Reloc::PIC_; |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 321 | } |
| 322 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 323 | AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT, |
| 324 | StringRef CPU, StringRef FS, |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 325 | TargetOptions Options, |
| 326 | Optional<Reloc::Model> RM, |
Rafael Espindola | 79e238a | 2017-08-03 02:16:21 +0000 | [diff] [blame] | 327 | Optional<CodeModel::Model> CM, |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 328 | CodeGenOpt::Level OptLevel) |
Matthias Braun | bb8507e | 2017-10-12 22:57:28 +0000 | [diff] [blame] | 329 | : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), |
| 330 | FS, Options, getEffectiveRelocModel(RM), |
David Green | ca29c27 | 2018-12-07 12:10:23 +0000 | [diff] [blame] | 331 | getEffectiveCodeModel(CM, CodeModel::Small), OptLevel), |
Rafael Espindola | 79e238a | 2017-08-03 02:16:21 +0000 | [diff] [blame] | 332 | TLOF(createTLOF(getTargetTriple())) { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 333 | initAsmInfo(); |
| 334 | } |
| 335 | |
Vlad Tsyrklevich | 688e752 | 2018-07-10 00:46:07 +0000 | [diff] [blame] | 336 | bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false; |
Matt Arsenault | a680199 | 2018-07-10 14:03:41 +0000 | [diff] [blame] | 337 | bool AMDGPUTargetMachine::EnableFunctionCalls = false; |
| 338 | |
| 339 | AMDGPUTargetMachine::~AMDGPUTargetMachine() = default; |
Vlad Tsyrklevich | 688e752 | 2018-07-10 00:46:07 +0000 | [diff] [blame] | 340 | |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 341 | StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const { |
| 342 | Attribute GPUAttr = F.getFnAttribute("target-cpu"); |
| 343 | return GPUAttr.hasAttribute(Attribute::None) ? |
| 344 | getTargetCPU() : GPUAttr.getValueAsString(); |
| 345 | } |
| 346 | |
| 347 | StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const { |
| 348 | Attribute FSAttr = F.getFnAttribute("target-features"); |
| 349 | |
| 350 | return FSAttr.hasAttribute(Attribute::None) ? |
| 351 | getTargetFeatureString() : |
| 352 | FSAttr.getValueAsString(); |
| 353 | } |
| 354 | |
Matt Arsenault | e745d99 | 2017-09-19 07:40:11 +0000 | [diff] [blame] | 355 | /// Predicate for Internalize pass. |
Benjamin Kramer | f9ab3dd | 2017-10-31 23:21:30 +0000 | [diff] [blame] | 356 | static bool mustPreserveGV(const GlobalValue &GV) { |
Matt Arsenault | e745d99 | 2017-09-19 07:40:11 +0000 | [diff] [blame] | 357 | if (const Function *F = dyn_cast<Function>(&GV)) |
| 358 | return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv()); |
| 359 | |
| 360 | return !GV.use_empty(); |
| 361 | } |
| 362 | |
Stanislav Mekhanoshin | 8159811 | 2017-01-26 16:49:08 +0000 | [diff] [blame] | 363 | void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { |
Stanislav Mekhanoshin | ee2dd78 | 2017-03-17 17:13:41 +0000 | [diff] [blame] | 364 | Builder.DivergentTarget = true; |
| 365 | |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 366 | bool EnableOpt = getOptLevel() > CodeGenOpt::None; |
Matt Arsenault | e745d99 | 2017-09-19 07:40:11 +0000 | [diff] [blame] | 367 | bool Internalize = InternalizeSymbols; |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 368 | bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls; |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 369 | bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt; |
| 370 | bool LibCallSimplify = EnableLibCallSimplify && EnableOpt; |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 371 | |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 372 | if (EnableFunctionCalls) { |
Stanislav Mekhanoshin | 2e3bf37 | 2017-09-20 06:34:28 +0000 | [diff] [blame] | 373 | delete Builder.Inliner; |
Stanislav Mekhanoshin | 5641820 | 2017-09-20 06:10:15 +0000 | [diff] [blame] | 374 | Builder.Inliner = createAMDGPUFunctionInliningPass(); |
Stanislav Mekhanoshin | 2e3bf37 | 2017-09-20 06:34:28 +0000 | [diff] [blame] | 375 | } |
Stanislav Mekhanoshin | 5670e6d | 2017-09-20 04:25:58 +0000 | [diff] [blame] | 376 | |
Stanislav Mekhanoshin | 8159811 | 2017-01-26 16:49:08 +0000 | [diff] [blame] | 377 | Builder.addExtension( |
Stanislav Mekhanoshin | f6c1feb | 2017-01-27 16:38:10 +0000 | [diff] [blame] | 378 | PassManagerBuilder::EP_ModuleOptimizerEarly, |
Stanislav Mekhanoshin | 9053f22 | 2017-03-28 18:23:24 +0000 | [diff] [blame] | 379 | [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &, |
| 380 | legacy::PassManagerBase &PM) { |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 381 | if (AMDGPUAA) { |
| 382 | PM.add(createAMDGPUAAWrapperPass()); |
| 383 | PM.add(createAMDGPUExternalAAWrapperPass()); |
| 384 | } |
Stanislav Mekhanoshin | 8159811 | 2017-01-26 16:49:08 +0000 | [diff] [blame] | 385 | PM.add(createAMDGPUUnifyMetadataPass()); |
Stanislav Mekhanoshin | a3b7279 | 2017-01-30 21:05:18 +0000 | [diff] [blame] | 386 | if (Internalize) { |
Matt Arsenault | e745d99 | 2017-09-19 07:40:11 +0000 | [diff] [blame] | 387 | PM.add(createInternalizePass(mustPreserveGV)); |
Stanislav Mekhanoshin | a3b7279 | 2017-01-30 21:05:18 +0000 | [diff] [blame] | 388 | PM.add(createGlobalDCEPass()); |
| 389 | } |
Stanislav Mekhanoshin | 9053f22 | 2017-03-28 18:23:24 +0000 | [diff] [blame] | 390 | if (EarlyInline) |
Stanislav Mekhanoshin | 89653df | 2017-03-30 20:16:02 +0000 | [diff] [blame] | 391 | PM.add(createAMDGPUAlwaysInlinePass(false)); |
Stanislav Mekhanoshin | a3b7279 | 2017-01-30 21:05:18 +0000 | [diff] [blame] | 392 | }); |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 393 | |
Stanislav Mekhanoshin | 1d8cf2b | 2017-09-29 23:40:19 +0000 | [diff] [blame] | 394 | const auto &Opt = Options; |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 395 | Builder.addExtension( |
| 396 | PassManagerBuilder::EP_EarlyAsPossible, |
Stanislav Mekhanoshin | 1d8cf2b | 2017-09-29 23:40:19 +0000 | [diff] [blame] | 397 | [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &, |
| 398 | legacy::PassManagerBase &PM) { |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 399 | if (AMDGPUAA) { |
| 400 | PM.add(createAMDGPUAAWrapperPass()); |
| 401 | PM.add(createAMDGPUExternalAAWrapperPass()); |
| 402 | } |
Stanislav Mekhanoshin | 7f37794 | 2017-08-11 16:42:09 +0000 | [diff] [blame] | 403 | PM.add(llvm::createAMDGPUUseNativeCallsPass()); |
| 404 | if (LibCallSimplify) |
Stanislav Mekhanoshin | 1d8cf2b | 2017-09-29 23:40:19 +0000 | [diff] [blame] | 405 | PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt)); |
Stanislav Mekhanoshin | a27b2ca | 2017-03-24 18:01:14 +0000 | [diff] [blame] | 406 | }); |
Stanislav Mekhanoshin | 50c2f25 | 2017-06-19 23:17:36 +0000 | [diff] [blame] | 407 | |
| 408 | Builder.addExtension( |
| 409 | PassManagerBuilder::EP_CGSCCOptimizerLate, |
| 410 | [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { |
| 411 | // Add infer address spaces pass to the opt pipeline after inlining |
| 412 | // but before SROA to increase SROA opportunities. |
| 413 | PM.add(createInferAddressSpacesPass()); |
Matt Arsenault | 372d796 | 2018-05-18 21:35:00 +0000 | [diff] [blame] | 414 | |
| 415 | // This should run after inlining to have any chance of doing anything, |
| 416 | // and before other cleanup optimizations. |
| 417 | PM.add(createAMDGPULowerKernelAttributesPass()); |
Stanislav Mekhanoshin | 50c2f25 | 2017-06-19 23:17:36 +0000 | [diff] [blame] | 418 | }); |
Stanislav Mekhanoshin | 50ea93a | 2016-12-08 19:46:04 +0000 | [diff] [blame] | 419 | } |
| 420 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 421 | //===----------------------------------------------------------------------===// |
| 422 | // R600 Target Machine (R600 -> Cayman) |
| 423 | //===----------------------------------------------------------------------===// |
| 424 | |
| 425 | R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT, |
Tom Stellard | 5dde1d2 | 2016-02-05 18:29:17 +0000 | [diff] [blame] | 426 | StringRef CPU, StringRef FS, |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 427 | TargetOptions Options, |
| 428 | Optional<Reloc::Model> RM, |
Rafael Espindola | 79e238a | 2017-08-03 02:16:21 +0000 | [diff] [blame] | 429 | Optional<CodeModel::Model> CM, |
| 430 | CodeGenOpt::Level OL, bool JIT) |
| 431 | : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) { |
Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 432 | setRequiresStructuredCFG(true); |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 433 | |
Matt Arsenault | 09a09ef | 2019-02-28 00:52:33 +0000 | [diff] [blame] | 434 | // Override the default since calls aren't supported for r600. |
Matt Arsenault | 5d567dc | 2019-02-28 00:40:32 +0000 | [diff] [blame] | 435 | if (EnableFunctionCalls && |
| 436 | EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0) |
| 437 | EnableFunctionCalls = false; |
Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 438 | } |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 439 | |
| 440 | const R600Subtarget *R600TargetMachine::getSubtargetImpl( |
| 441 | const Function &F) const { |
| 442 | StringRef GPU = getGPUName(F); |
| 443 | StringRef FS = getFeatureString(F); |
| 444 | |
| 445 | SmallString<128> SubtargetKey(GPU); |
| 446 | SubtargetKey.append(FS); |
| 447 | |
| 448 | auto &I = SubtargetMap[SubtargetKey]; |
| 449 | if (!I) { |
| 450 | // This needs to be done before we create a new subtarget since any |
| 451 | // creation will depend on the TM and the code generation flags on the |
| 452 | // function that reside in TargetOptions. |
| 453 | resetTargetOptions(F); |
| 454 | I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this); |
| 455 | } |
| 456 | |
| 457 | return I.get(); |
| 458 | } |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 459 | |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 460 | TargetTransformInfo |
| 461 | R600TargetMachine::getTargetTransformInfo(const Function &F) { |
| 462 | return TargetTransformInfo(R600TTIImpl(this, F)); |
| 463 | } |
| 464 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 465 | //===----------------------------------------------------------------------===// |
| 466 | // GCN Target Machine (SI+) |
| 467 | //===----------------------------------------------------------------------===// |
| 468 | |
| 469 | GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT, |
Tom Stellard | 5dde1d2 | 2016-02-05 18:29:17 +0000 | [diff] [blame] | 470 | StringRef CPU, StringRef FS, |
Rafael Espindola | 8c34dd8 | 2016-05-18 22:04:49 +0000 | [diff] [blame] | 471 | TargetOptions Options, |
| 472 | Optional<Reloc::Model> RM, |
Rafael Espindola | 79e238a | 2017-08-03 02:16:21 +0000 | [diff] [blame] | 473 | Optional<CodeModel::Model> CM, |
| 474 | CodeGenOpt::Level OL, bool JIT) |
| 475 | : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {} |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 476 | |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 477 | const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const { |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 478 | StringRef GPU = getGPUName(F); |
| 479 | StringRef FS = getFeatureString(F); |
| 480 | |
| 481 | SmallString<128> SubtargetKey(GPU); |
| 482 | SubtargetKey.append(FS); |
| 483 | |
| 484 | auto &I = SubtargetMap[SubtargetKey]; |
| 485 | if (!I) { |
| 486 | // This needs to be done before we create a new subtarget since any |
| 487 | // creation will depend on the TM and the code generation flags on the |
| 488 | // function that reside in TargetOptions. |
| 489 | resetTargetOptions(F); |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 490 | I = llvm::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this); |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 491 | } |
| 492 | |
Alexander Timofeev | 1800956 | 2016-12-08 17:28:47 +0000 | [diff] [blame] | 493 | I->setScalarizeGlobalBehavior(ScalarizeGlobal); |
| 494 | |
Matt Arsenault | 59c0ffa | 2016-06-27 20:48:03 +0000 | [diff] [blame] | 495 | return I.get(); |
| 496 | } |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 497 | |
Tom Stellard | c762431 | 2018-05-30 22:55:35 +0000 | [diff] [blame] | 498 | TargetTransformInfo |
| 499 | GCNTargetMachine::getTargetTransformInfo(const Function &F) { |
| 500 | return TargetTransformInfo(GCNTTIImpl(this, F)); |
| 501 | } |
| 502 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 503 | //===----------------------------------------------------------------------===// |
| 504 | // AMDGPU Pass Setup |
| 505 | //===----------------------------------------------------------------------===// |
| 506 | |
| 507 | namespace { |
Tom Stellard | cc7067a6 | 2016-03-03 03:53:29 +0000 | [diff] [blame] | 508 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 509 | class AMDGPUPassConfig : public TargetPassConfig { |
| 510 | public: |
Matthias Braun | bb8507e | 2017-10-12 22:57:28 +0000 | [diff] [blame] | 511 | AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) |
Matt Arsenault | 0a10900 | 2015-09-25 17:41:20 +0000 | [diff] [blame] | 512 | : TargetPassConfig(TM, PM) { |
Matt Arsenault | 0a10900 | 2015-09-25 17:41:20 +0000 | [diff] [blame] | 513 | // Exceptions and StackMaps are not supported, so these passes will never do |
| 514 | // anything. |
| 515 | disablePass(&StackMapLivenessID); |
| 516 | disablePass(&FuncletLayoutID); |
| 517 | } |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 518 | |
| 519 | AMDGPUTargetMachine &getAMDGPUTargetMachine() const { |
| 520 | return getTM<AMDGPUTargetMachine>(); |
| 521 | } |
| 522 | |
Matthias Braun | 115efcd | 2016-11-28 20:11:54 +0000 | [diff] [blame] | 523 | ScheduleDAGInstrs * |
| 524 | createMachineScheduler(MachineSchedContext *C) const override { |
| 525 | ScheduleDAGMILive *DAG = createGenericSchedLive(C); |
| 526 | DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 527 | DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); |
| 528 | return DAG; |
| 529 | } |
| 530 | |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 531 | void addEarlyCSEOrGVNPass(); |
| 532 | void addStraightLineScalarOptimizationPasses(); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 533 | void addIRPasses() override; |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 534 | void addCodeGenPrepare() override; |
Matt Arsenault | 0a10900 | 2015-09-25 17:41:20 +0000 | [diff] [blame] | 535 | bool addPreISel() override; |
| 536 | bool addInstSelector() override; |
| 537 | bool addGCPasses() override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 538 | }; |
| 539 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 540 | class R600PassConfig final : public AMDGPUPassConfig { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 541 | public: |
Matthias Braun | bb8507e | 2017-10-12 22:57:28 +0000 | [diff] [blame] | 542 | R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 543 | : AMDGPUPassConfig(TM, PM) {} |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 544 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 545 | ScheduleDAGInstrs *createMachineScheduler( |
| 546 | MachineSchedContext *C) const override { |
| 547 | return createR600MachineScheduler(C); |
| 548 | } |
| 549 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 550 | bool addPreISel() override; |
Tom Stellard | 2028769 | 2017-08-08 04:57:55 +0000 | [diff] [blame] | 551 | bool addInstSelector() override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 552 | void addPreRegAlloc() override; |
| 553 | void addPreSched2() override; |
| 554 | void addPreEmitPass() override; |
| 555 | }; |
| 556 | |
Matt Arsenault | 6b6a2c3 | 2016-03-11 08:00:27 +0000 | [diff] [blame] | 557 | class GCNPassConfig final : public AMDGPUPassConfig { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 558 | public: |
Matthias Braun | bb8507e | 2017-10-12 22:57:28 +0000 | [diff] [blame] | 559 | GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM) |
Matt Arsenault | 6ed7b9b | 2017-08-02 01:31:28 +0000 | [diff] [blame] | 560 | : AMDGPUPassConfig(TM, PM) { |
Matt Arsenault | a202538 | 2017-08-03 23:24:05 +0000 | [diff] [blame] | 561 | // It is necessary to know the register usage of the entire call graph. We |
| 562 | // allow calls without EnableAMDGPUFunctionCalls if they are marked |
| 563 | // noinline, so this is always required. |
| 564 | setRequiresCodeGenSCCOrder(true); |
Matt Arsenault | 6ed7b9b | 2017-08-02 01:31:28 +0000 | [diff] [blame] | 565 | } |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 566 | |
| 567 | GCNTargetMachine &getGCNTargetMachine() const { |
| 568 | return getTM<GCNTargetMachine>(); |
| 569 | } |
| 570 | |
| 571 | ScheduleDAGInstrs * |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 572 | createMachineScheduler(MachineSchedContext *C) const override; |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 573 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 574 | bool addPreISel() override; |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 575 | void addMachineSSAOptimization() override; |
Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 576 | bool addILPOpts() override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 577 | bool addInstSelector() override; |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 578 | bool addIRTranslator() override; |
Tim Northover | 33b07d6 | 2016-07-22 20:03:43 +0000 | [diff] [blame] | 579 | bool addLegalizeMachineIR() override; |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 580 | bool addRegBankSelect() override; |
Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 581 | bool addGlobalInstructionSelect() override; |
Matt Arsenault | b87fc22 | 2015-10-01 22:10:03 +0000 | [diff] [blame] | 582 | void addFastRegAlloc(FunctionPass *RegAllocPass) override; |
| 583 | void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 584 | void addPreRegAlloc() override; |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 585 | void addPostRegAlloc() override; |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 586 | void addPreSched2() override; |
| 587 | void addPreEmitPass() override; |
| 588 | }; |
| 589 | |
Eugene Zelenko | 6a9226d | 2016-12-12 22:23:53 +0000 | [diff] [blame] | 590 | } // end anonymous namespace |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 591 | |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 592 | void AMDGPUPassConfig::addEarlyCSEOrGVNPass() { |
| 593 | if (getOptLevel() == CodeGenOpt::Aggressive) |
| 594 | addPass(createGVNPass()); |
| 595 | else |
| 596 | addPass(createEarlyCSEPass()); |
| 597 | } |
| 598 | |
| 599 | void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() { |
Stanislav Mekhanoshin | 20d4795 | 2018-06-29 16:26:53 +0000 | [diff] [blame] | 600 | addPass(createLICMPass()); |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 601 | addPass(createSeparateConstOffsetFromGEPPass()); |
| 602 | addPass(createSpeculativeExecutionPass()); |
| 603 | // ReassociateGEPs exposes more opportunites for SLSR. See |
| 604 | // the example in reassociate-geps-and-slsr.ll. |
| 605 | addPass(createStraightLineStrengthReducePass()); |
| 606 | // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or |
| 607 | // EarlyCSE can reuse. |
| 608 | addEarlyCSEOrGVNPass(); |
| 609 | // Run NaryReassociate after EarlyCSE/GVN to be more effective. |
| 610 | addPass(createNaryReassociatePass()); |
| 611 | // NaryReassociate on GEPs creates redundant common expressions, so run |
| 612 | // EarlyCSE after it. |
| 613 | addPass(createEarlyCSEPass()); |
| 614 | } |
| 615 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 616 | void AMDGPUPassConfig::addIRPasses() { |
Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 617 | const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine(); |
| 618 | |
Matt Arsenault | bde8034 | 2016-05-18 15:41:07 +0000 | [diff] [blame] | 619 | // There is no reason to run these. |
| 620 | disablePass(&StackMapLivenessID); |
| 621 | disablePass(&FuncletLayoutID); |
| 622 | disablePass(&PatchableFunctionID); |
| 623 | |
Matt Arsenault | ab41193 | 2018-10-02 03:50:56 +0000 | [diff] [blame] | 624 | addPass(createAtomicExpandPass()); |
Scott Linder | 11ef798 | 2018-10-26 13:18:36 +0000 | [diff] [blame] | 625 | |
| 626 | // This must occur before inlining, as the inliner will not look through |
| 627 | // bitcast calls. |
| 628 | addPass(createAMDGPUFixFunctionBitcastsPass()); |
| 629 | |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 630 | addPass(createAMDGPULowerIntrinsicsPass()); |
Matt Arsenault | 0699ef3 | 2017-02-09 22:00:42 +0000 | [diff] [blame] | 631 | |
Matt Arsenault | 635d479 | 2018-10-03 02:47:25 +0000 | [diff] [blame] | 632 | // Function calls are not supported, so make sure we inline everything. |
| 633 | addPass(createAMDGPUAlwaysInlinePass()); |
| 634 | addPass(createAlwaysInlinerLegacyPass()); |
| 635 | // We need to add the barrier noop pass, otherwise adding the function |
| 636 | // inlining pass will cause all of the PassConfigs passes to be run |
| 637 | // one function at a time, which means if we have a nodule with two |
| 638 | // functions, then we will generate code for the first function |
| 639 | // without ever running any passes on the second. |
| 640 | addPass(createBarrierNoopPass()); |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 641 | |
Matt Arsenault | 0c32938 | 2017-01-30 18:40:29 +0000 | [diff] [blame] | 642 | if (TM.getTargetTriple().getArch() == Triple::amdgcn) { |
| 643 | // TODO: May want to move later or split into an early and late one. |
| 644 | |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 645 | addPass(createAMDGPUCodeGenPreparePass()); |
Matt Arsenault | 0c32938 | 2017-01-30 18:40:29 +0000 | [diff] [blame] | 646 | } |
| 647 | |
Tom Stellard | fd25395 | 2015-08-07 23:19:30 +0000 | [diff] [blame] | 648 | // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments. |
Matt Arsenault | 432aaea | 2018-05-13 10:04:48 +0000 | [diff] [blame] | 649 | if (TM.getTargetTriple().getArch() == Triple::r600) |
| 650 | addPass(createR600OpenCLImageTypeLoweringPass()); |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 651 | |
Yaxun Liu | de4b88d | 2017-10-10 19:39:48 +0000 | [diff] [blame] | 652 | // Replace OpenCL enqueued block function pointers with global variables. |
| 653 | addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass()); |
| 654 | |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 655 | if (TM.getOptLevel() > CodeGenOpt::None) { |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 656 | addPass(createInferAddressSpacesPass()); |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 657 | addPass(createAMDGPUPromoteAlloca()); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 658 | |
| 659 | if (EnableSROA) |
| 660 | addPass(createSROAPass()); |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 661 | |
Konstantin Zhuravlyov | 4658e5f | 2016-09-30 16:39:24 +0000 | [diff] [blame] | 662 | addStraightLineScalarOptimizationPasses(); |
Stanislav Mekhanoshin | 8e45acf | 2017-03-17 23:56:58 +0000 | [diff] [blame] | 663 | |
| 664 | if (EnableAMDGPUAliasAnalysis) { |
| 665 | addPass(createAMDGPUAAWrapperPass()); |
| 666 | addPass(createExternalAAWrapperPass([](Pass &P, Function &, |
| 667 | AAResults &AAR) { |
| 668 | if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>()) |
| 669 | AAR.addAAResult(WrapperPass->getResult()); |
| 670 | })); |
| 671 | } |
Konstantin Zhuravlyov | 4658e5f | 2016-09-30 16:39:24 +0000 | [diff] [blame] | 672 | } |
Matt Arsenault | f42c692 | 2016-06-15 00:11:01 +0000 | [diff] [blame] | 673 | |
| 674 | TargetPassConfig::addIRPasses(); |
| 675 | |
| 676 | // EarlyCSE is not always strong enough to clean up what LSR produces. For |
| 677 | // example, GVN can combine |
| 678 | // |
| 679 | // %0 = add %a, %b |
| 680 | // %1 = add %b, %a |
| 681 | // |
| 682 | // and |
| 683 | // |
| 684 | // %0 = shl nsw %a, 2 |
| 685 | // %1 = shl %a, 2 |
| 686 | // |
| 687 | // but EarlyCSE can do neither of them. |
| 688 | if (getOptLevel() != CodeGenOpt::None) |
| 689 | addEarlyCSEOrGVNPass(); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 690 | } |
| 691 | |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 692 | void AMDGPUPassConfig::addCodeGenPrepare() { |
Aakanksha Patil | c56d2af | 2019-03-07 00:54:04 +0000 | [diff] [blame] | 693 | if (TM->getTargetTriple().getArch() == Triple::amdgcn) |
| 694 | addPass(createAMDGPUAnnotateKernelFeaturesPass()); |
| 695 | |
Matt Arsenault | 8c4a352 | 2018-06-26 19:10:00 +0000 | [diff] [blame] | 696 | if (TM->getTargetTriple().getArch() == Triple::amdgcn && |
| 697 | EnableLowerKernelArguments) |
| 698 | addPass(createAMDGPULowerKernelArgumentsPass()); |
| 699 | |
Matt Arsenault | 908b9e2 | 2016-07-01 03:33:52 +0000 | [diff] [blame] | 700 | TargetPassConfig::addCodeGenPrepare(); |
| 701 | |
| 702 | if (EnableLoadStoreVectorizer) |
| 703 | addPass(createLoadStoreVectorizerPass()); |
| 704 | } |
| 705 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 706 | bool AMDGPUPassConfig::addPreISel() { |
Sameer Sahasrabuddhe | b4f2d1c | 2018-09-25 09:39:21 +0000 | [diff] [blame] | 707 | addPass(createLowerSwitchPass()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 708 | addPass(createFlattenCFGPass()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 709 | return false; |
| 710 | } |
| 711 | |
| 712 | bool AMDGPUPassConfig::addInstSelector() { |
Matt Arsenault | 7016f13 | 2017-08-03 22:30:46 +0000 | [diff] [blame] | 713 | addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel())); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 714 | return false; |
| 715 | } |
| 716 | |
Matt Arsenault | 0a10900 | 2015-09-25 17:41:20 +0000 | [diff] [blame] | 717 | bool AMDGPUPassConfig::addGCPasses() { |
| 718 | // Do nothing. GC is not supported. |
| 719 | return false; |
| 720 | } |
| 721 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 722 | //===----------------------------------------------------------------------===// |
| 723 | // R600 Pass Setup |
| 724 | //===----------------------------------------------------------------------===// |
| 725 | |
| 726 | bool R600PassConfig::addPreISel() { |
| 727 | AMDGPUPassConfig::addPreISel(); |
Matt Arsenault | c581611 | 2016-06-24 06:30:22 +0000 | [diff] [blame] | 728 | |
| 729 | if (EnableR600StructurizeCFG) |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 730 | addPass(createStructurizeCFGPass()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 731 | return false; |
| 732 | } |
| 733 | |
Tom Stellard | 2028769 | 2017-08-08 04:57:55 +0000 | [diff] [blame] | 734 | bool R600PassConfig::addInstSelector() { |
| 735 | addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel())); |
| 736 | return false; |
| 737 | } |
| 738 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 739 | void R600PassConfig::addPreRegAlloc() { |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 740 | addPass(createR600VectorRegMerger()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 741 | } |
| 742 | |
| 743 | void R600PassConfig::addPreSched2() { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 744 | addPass(createR600EmitClauseMarkers(), false); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 745 | if (EnableR600IfConvert) |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 746 | addPass(&IfConverterID, false); |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 747 | addPass(createR600ClauseMergePass(), false); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 748 | } |
| 749 | |
| 750 | void R600PassConfig::addPreEmitPass() { |
| 751 | addPass(createAMDGPUCFGStructurizerPass(), false); |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 752 | addPass(createR600ExpandSpecialInstrsPass(), false); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 753 | addPass(&FinalizeMachineBundlesID, false); |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 754 | addPass(createR600Packetizer(), false); |
| 755 | addPass(createR600ControlFlowFinalizer(), false); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 756 | } |
| 757 | |
| 758 | TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) { |
Matthias Braun | 5e394c3 | 2017-05-30 21:36:41 +0000 | [diff] [blame] | 759 | return new R600PassConfig(*this, PM); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 760 | } |
| 761 | |
| 762 | //===----------------------------------------------------------------------===// |
| 763 | // GCN Pass Setup |
| 764 | //===----------------------------------------------------------------------===// |
| 765 | |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 766 | ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler( |
| 767 | MachineSchedContext *C) const { |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 768 | const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>(); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 769 | if (ST.enableSIScheduler()) |
| 770 | return createSIMachineScheduler(C); |
Tom Stellard | 0d23ebe | 2016-08-29 19:42:52 +0000 | [diff] [blame] | 771 | return createGCNMaxOccupancyMachineScheduler(C); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 772 | } |
| 773 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 774 | bool GCNPassConfig::addPreISel() { |
| 775 | AMDGPUPassConfig::addPreISel(); |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 776 | |
Neil Henning | 6641657 | 2018-10-08 15:49:19 +0000 | [diff] [blame] | 777 | if (EnableAtomicOptimizations) { |
| 778 | addPass(createAMDGPUAtomicOptimizerPass()); |
| 779 | } |
| 780 | |
Matt Arsenault | 3931948 | 2015-11-06 18:01:57 +0000 | [diff] [blame] | 781 | // FIXME: We need to run a pass to propagate the attributes when calls are |
| 782 | // supported. |
Matt Arsenault | b8f8dbc | 2017-03-24 19:52:05 +0000 | [diff] [blame] | 783 | |
| 784 | // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit |
| 785 | // regions formed by them. |
| 786 | addPass(&AMDGPUUnifyDivergentExitNodesID); |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 787 | if (!LateCFGStructurize) { |
| 788 | addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions |
| 789 | } |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 790 | addPass(createSinkingPass()); |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 791 | addPass(createAMDGPUAnnotateUniformValues()); |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 792 | if (!LateCFGStructurize) { |
| 793 | addPass(createSIAnnotateControlFlowPass()); |
| 794 | } |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 795 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 796 | return false; |
| 797 | } |
| 798 | |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 799 | void GCNPassConfig::addMachineSSAOptimization() { |
| 800 | TargetPassConfig::addMachineSSAOptimization(); |
| 801 | |
| 802 | // We want to fold operands after PeepholeOptimizer has run (or as part of |
| 803 | // it), because it will eliminate extra copies making it easier to fold the |
| 804 | // real source operand. We want to eliminate dead instructions after, so that |
| 805 | // we see fewer uses of the copies. We then need to clean up the dead |
| 806 | // instructions leftover after the operands are folded as well. |
| 807 | // |
| 808 | // XXX - Can we get away without running DeadMachineInstructionElim again? |
| 809 | addPass(&SIFoldOperandsID); |
Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 810 | if (EnableDPPCombine) |
| 811 | addPass(&GCNDPPCombineID); |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 812 | addPass(&DeadMachineInstructionElimID); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 813 | addPass(&SILoadStoreOptimizerID); |
Sam Kolton | 6e79529 | 2017-04-07 10:53:12 +0000 | [diff] [blame] | 814 | if (EnableSDWAPeephole) { |
| 815 | addPass(&SIPeepholeSDWAID); |
Matthias Braun | 4a7c8e7 | 2018-01-19 06:46:10 +0000 | [diff] [blame] | 816 | addPass(&EarlyMachineLICMID); |
Stanislav Mekhanoshin | 56ea488 | 2017-05-30 16:49:24 +0000 | [diff] [blame] | 817 | addPass(&MachineCSEID); |
| 818 | addPass(&SIFoldOperandsID); |
Sam Kolton | 6e79529 | 2017-04-07 10:53:12 +0000 | [diff] [blame] | 819 | addPass(&DeadMachineInstructionElimID); |
| 820 | } |
Stanislav Mekhanoshin | 0330660 | 2017-06-03 17:39:47 +0000 | [diff] [blame] | 821 | addPass(createSIShrinkInstructionsPass()); |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 822 | } |
| 823 | |
Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 824 | bool GCNPassConfig::addILPOpts() { |
| 825 | if (EnableEarlyIfConversion) |
| 826 | addPass(&EarlyIfConverterID); |
| 827 | |
| 828 | TargetPassConfig::addILPOpts(); |
| 829 | return false; |
| 830 | } |
| 831 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 832 | bool GCNPassConfig::addInstSelector() { |
| 833 | AMDGPUPassConfig::addInstSelector(); |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 834 | addPass(&SIFixSGPRCopiesID); |
Nicolai Haehnle | 814abb5 | 2018-10-31 13:27:08 +0000 | [diff] [blame] | 835 | addPass(createSILowerI1CopiesPass()); |
Ron Lieberman | cac749a | 2018-11-16 01:13:34 +0000 | [diff] [blame] | 836 | addPass(createSIFixupVectorISelPass()); |
David Stuttard | f77079f | 2019-01-14 11:55:24 +0000 | [diff] [blame] | 837 | addPass(createSIAddIMGInitPass()); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 838 | return false; |
| 839 | } |
| 840 | |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 841 | bool GCNPassConfig::addIRTranslator() { |
| 842 | addPass(new IRTranslator()); |
| 843 | return false; |
| 844 | } |
| 845 | |
Tim Northover | 33b07d6 | 2016-07-22 20:03:43 +0000 | [diff] [blame] | 846 | bool GCNPassConfig::addLegalizeMachineIR() { |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 847 | addPass(new Legalizer()); |
Tim Northover | 33b07d6 | 2016-07-22 20:03:43 +0000 | [diff] [blame] | 848 | return false; |
| 849 | } |
| 850 | |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 851 | bool GCNPassConfig::addRegBankSelect() { |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 852 | addPass(new RegBankSelect()); |
Tom Stellard | 000c5af | 2016-04-14 19:09:28 +0000 | [diff] [blame] | 853 | return false; |
| 854 | } |
Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 855 | |
| 856 | bool GCNPassConfig::addGlobalInstructionSelect() { |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 857 | addPass(new InstructionSelect()); |
Ahmed Bougacha | 6756a2c | 2016-07-27 14:31:55 +0000 | [diff] [blame] | 858 | return false; |
| 859 | } |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 860 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 861 | void GCNPassConfig::addPreRegAlloc() { |
Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 862 | if (LateCFGStructurize) { |
| 863 | addPass(createAMDGPUMachineCFGStructurizerPass()); |
| 864 | } |
Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 865 | addPass(createSIWholeQuadModePass()); |
Matt Arsenault | b87fc22 | 2015-10-01 22:10:03 +0000 | [diff] [blame] | 866 | } |
| 867 | |
| 868 | void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) { |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 869 | // FIXME: We have to disable the verifier here because of PHIElimination + |
| 870 | // TwoAddressInstructions disabling it. |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 871 | |
| 872 | // This must be run immediately after phi elimination and before |
| 873 | // TwoAddressInstructions, otherwise the processing of the tied operand of |
| 874 | // SI_ELSE will introduce a copy of the tied operand source after the else. |
| 875 | insertPass(&PHIEliminationID, &SILowerControlFlowID, false); |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 876 | |
Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 877 | // This must be run after SILowerControlFlow, since it needs to use the |
| 878 | // machine-level CFG, but before register allocation. |
| 879 | insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false); |
| 880 | |
Matt Arsenault | b87fc22 | 2015-10-01 22:10:03 +0000 | [diff] [blame] | 881 | TargetPassConfig::addFastRegAlloc(RegAllocPass); |
| 882 | } |
| 883 | |
| 884 | void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { |
Matt Arsenault | 9d288e6 | 2017-08-07 18:12:48 +0000 | [diff] [blame] | 885 | insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID); |
Stanislav Mekhanoshin | 37e7f95 | 2017-08-01 23:14:32 +0000 | [diff] [blame] | 886 | |
Stanislav Mekhanoshin | 739174c | 2018-05-31 20:13:51 +0000 | [diff] [blame] | 887 | insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID); |
| 888 | |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 889 | // This must be run immediately after phi elimination and before |
| 890 | // TwoAddressInstructions, otherwise the processing of the tied operand of |
| 891 | // SI_ELSE will introduce a copy of the tied operand source after the else. |
| 892 | insertPass(&PHIEliminationID, &SILowerControlFlowID, false); |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 893 | |
Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 894 | // This must be run after SILowerControlFlow, since it needs to use the |
| 895 | // machine-level CFG, but before register allocation. |
| 896 | insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false); |
| 897 | |
Matt Arsenault | b87fc22 | 2015-10-01 22:10:03 +0000 | [diff] [blame] | 898 | TargetPassConfig::addOptimizedRegAlloc(RegAllocPass); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 899 | } |
| 900 | |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 901 | void GCNPassConfig::addPostRegAlloc() { |
Stanislav Mekhanoshin | 22a56f2 | 2017-01-24 17:46:17 +0000 | [diff] [blame] | 902 | addPass(&SIFixVGPRCopiesID); |
Matt Arsenault | 105fc1a | 2018-11-26 17:02:02 +0000 | [diff] [blame] | 903 | if (getOptLevel() > CodeGenOpt::None) |
| 904 | addPass(&SIOptimizeExecMaskingID); |
Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 905 | TargetPassConfig::addPostRegAlloc(); |
| 906 | } |
| 907 | |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 908 | void GCNPassConfig::addPreSched2() { |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 909 | } |
| 910 | |
| 911 | void GCNPassConfig::addPreEmitPass() { |
Mark Searles | 72da47d | 2018-07-16 10:02:41 +0000 | [diff] [blame] | 912 | addPass(createSIMemoryLegalizerPass()); |
| 913 | addPass(createSIInsertWaitcntsPass()); |
| 914 | addPass(createSIShrinkInstructionsPass()); |
Tim Corringham | 4c4d2fe | 2018-12-10 12:06:10 +0000 | [diff] [blame] | 915 | addPass(createSIModeRegisterPass()); |
Mark Searles | 72da47d | 2018-07-16 10:02:41 +0000 | [diff] [blame] | 916 | |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 917 | // The hazard recognizer that runs as part of the post-ra scheduler does not |
Matt Arsenault | 254a645 | 2016-06-28 16:59:53 +0000 | [diff] [blame] | 918 | // guarantee to be able handle all hazards correctly. This is because if there |
| 919 | // are multiple scheduling regions in a basic block, the regions are scheduled |
| 920 | // bottom up, so when we begin to schedule a region we don't know what |
| 921 | // instructions were emitted directly before it. |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 922 | // |
Matt Arsenault | 254a645 | 2016-06-28 16:59:53 +0000 | [diff] [blame] | 923 | // Here we add a stand-alone hazard recognizer pass which can handle all |
| 924 | // cases. |
Mark Searles | 72da47d | 2018-07-16 10:02:41 +0000 | [diff] [blame] | 925 | // |
| 926 | // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would |
| 927 | // be better for it to emit S_NOP <N> when possible. |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 928 | addPass(&PostRAHazardRecognizerID); |
| 929 | |
Matt Arsenault | 78fc9da | 2016-08-22 19:33:16 +0000 | [diff] [blame] | 930 | addPass(&SIInsertSkipsPassID); |
Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 931 | addPass(&BranchRelaxationPassID); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 932 | } |
| 933 | |
| 934 | TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) { |
Matthias Braun | 5e394c3 | 2017-05-30 21:36:41 +0000 | [diff] [blame] | 935 | return new GCNPassConfig(*this, PM); |
Tom Stellard | 45bb48e | 2015-06-13 03:28:10 +0000 | [diff] [blame] | 936 | } |
Matt Arsenault | bc6d07c | 2019-03-14 22:54:43 +0000 | [diff] [blame] | 937 | |
| 938 | yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const { |
| 939 | return new yaml::SIMachineFunctionInfo(); |
| 940 | } |
| 941 | |
| 942 | yaml::MachineFunctionInfo * |
| 943 | GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { |
| 944 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 945 | return new yaml::SIMachineFunctionInfo(*MFI, |
| 946 | *MF.getSubtarget().getRegisterInfo()); |
| 947 | } |
| 948 | |
| 949 | bool GCNTargetMachine::parseMachineFunctionInfo( |
| 950 | const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS, |
| 951 | SMDiagnostic &Error, SMRange &SourceRange) const { |
| 952 | const yaml::SIMachineFunctionInfo &YamlMFI = |
| 953 | reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_); |
| 954 | MachineFunction &MF = PFS.MF; |
| 955 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 956 | |
| 957 | MFI->initializeBaseYamlFields(YamlMFI); |
| 958 | |
| 959 | auto parseRegister = [&](const yaml::StringValue &RegName, unsigned &RegVal) { |
| 960 | if (parseNamedRegisterReference(PFS, RegVal, RegName.Value, Error)) { |
| 961 | SourceRange = RegName.SourceRange; |
| 962 | return true; |
| 963 | } |
| 964 | |
| 965 | return false; |
| 966 | }; |
| 967 | |
| 968 | auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) { |
| 969 | // Create a diagnostic for a the register string literal. |
| 970 | const MemoryBuffer &Buffer = |
| 971 | *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID()); |
| 972 | Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, |
| 973 | RegName.Value.size(), SourceMgr::DK_Error, |
| 974 | "incorrect register class for field", RegName.Value, |
| 975 | None, None); |
| 976 | SourceRange = RegName.SourceRange; |
| 977 | return true; |
| 978 | }; |
| 979 | |
| 980 | if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) || |
| 981 | parseRegister(YamlMFI.ScratchWaveOffsetReg, MFI->ScratchWaveOffsetReg) || |
| 982 | parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) || |
| 983 | parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg)) |
| 984 | return true; |
| 985 | |
| 986 | if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG && |
| 987 | !AMDGPU::SReg_128RegClass.contains(MFI->ScratchRSrcReg)) { |
| 988 | return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg); |
| 989 | } |
| 990 | |
| 991 | if (MFI->ScratchWaveOffsetReg != AMDGPU::SCRATCH_WAVE_OFFSET_REG && |
| 992 | !AMDGPU::SGPR_32RegClass.contains(MFI->ScratchWaveOffsetReg)) { |
| 993 | return diagnoseRegisterClass(YamlMFI.ScratchWaveOffsetReg); |
| 994 | } |
| 995 | |
| 996 | if (MFI->FrameOffsetReg != AMDGPU::FP_REG && |
| 997 | !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) { |
| 998 | return diagnoseRegisterClass(YamlMFI.FrameOffsetReg); |
| 999 | } |
| 1000 | |
| 1001 | if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG && |
| 1002 | !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) { |
| 1003 | return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg); |
| 1004 | } |
| 1005 | |
| 1006 | return false; |
| 1007 | } |