| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 1 | //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This pass eliminates allocas by either converting them into vectors or |
| 11 | // by migrating them to local address space. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "AMDGPU.h" |
| 16 | #include "AMDGPUSubtarget.h" |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 17 | #include "Utils/AMDGPUBaseInfo.h" |
| 18 | #include "llvm/ADT/APInt.h" |
| 19 | #include "llvm/ADT/None.h" |
| 20 | #include "llvm/ADT/STLExtras.h" |
| 21 | #include "llvm/ADT/StringRef.h" |
| 22 | #include "llvm/ADT/Triple.h" |
| 23 | #include "llvm/ADT/Twine.h" |
| Changpeng Fang | c85abbd | 2017-01-24 19:06:28 +0000 | [diff] [blame] | 24 | #include "llvm/Analysis/CaptureTracking.h" |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 25 | #include "llvm/Analysis/ValueTracking.h" |
| Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 26 | #include "llvm/CodeGen/TargetPassConfig.h" |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 27 | #include "llvm/IR/Attributes.h" |
| 28 | #include "llvm/IR/BasicBlock.h" |
| 29 | #include "llvm/IR/Constant.h" |
| 30 | #include "llvm/IR/Constants.h" |
| 31 | #include "llvm/IR/DataLayout.h" |
| 32 | #include "llvm/IR/DerivedTypes.h" |
| 33 | #include "llvm/IR/Function.h" |
| 34 | #include "llvm/IR/GlobalValue.h" |
| 35 | #include "llvm/IR/GlobalVariable.h" |
| Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 36 | #include "llvm/IR/IRBuilder.h" |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 37 | #include "llvm/IR/Instruction.h" |
| 38 | #include "llvm/IR/Instructions.h" |
| Matt Arsenault | bafc9dc | 2016-03-11 08:20:50 +0000 | [diff] [blame] | 39 | #include "llvm/IR/IntrinsicInst.h" |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 40 | #include "llvm/IR/Intrinsics.h" |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 41 | #include "llvm/IR/LLVMContext.h" |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 42 | #include "llvm/IR/Metadata.h" |
| 43 | #include "llvm/IR/Module.h" |
| 44 | #include "llvm/IR/Type.h" |
| 45 | #include "llvm/IR/User.h" |
| 46 | #include "llvm/IR/Value.h" |
| 47 | #include "llvm/Pass.h" |
| 48 | #include "llvm/Support/Casting.h" |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 49 | #include "llvm/Support/Debug.h" |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 50 | #include "llvm/Support/ErrorHandling.h" |
| 51 | #include "llvm/Support/MathExtras.h" |
| Benjamin Kramer | 16132e6 | 2015-03-23 18:07:13 +0000 | [diff] [blame] | 52 | #include "llvm/Support/raw_ostream.h" |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 53 | #include "llvm/Target/TargetMachine.h" |
| 54 | #include <algorithm> |
| 55 | #include <cassert> |
| 56 | #include <cstdint> |
| 57 | #include <map> |
| 58 | #include <tuple> |
| 59 | #include <utility> |
| 60 | #include <vector> |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 61 | |
| 62 | #define DEBUG_TYPE "amdgpu-promote-alloca" |
| 63 | |
| 64 | using namespace llvm; |
| 65 | |
| 66 | namespace { |
| 67 | |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 68 | // FIXME: This can create globals so should be a module pass. |
| Matt Arsenault | bafc9dc | 2016-03-11 08:20:50 +0000 | [diff] [blame] | 69 | class AMDGPUPromoteAlloca : public FunctionPass { |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 70 | private: |
| 71 | const TargetMachine *TM; |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 72 | Module *Mod = nullptr; |
| 73 | const DataLayout *DL = nullptr; |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 74 | AMDGPUAS AS; |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 75 | |
| 76 | // FIXME: This should be per-kernel. |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 77 | uint32_t LocalMemLimit = 0; |
| 78 | uint32_t CurrentLocalMemUsage = 0; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 79 | |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 80 | bool IsAMDGCN = false; |
| 81 | bool IsAMDHSA = false; |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 82 | |
| 83 | std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder); |
| 84 | Value *getWorkitemID(IRBuilder<> &Builder, unsigned N); |
| 85 | |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 86 | /// BaseAlloca is the alloca root the search started from. |
| 87 | /// Val may be that alloca or a recursive user of it. |
| 88 | bool collectUsesWithPtrTypes(Value *BaseAlloca, |
| 89 | Value *Val, |
| 90 | std::vector<Value*> &WorkList) const; |
| 91 | |
| 92 | /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand |
| 93 | /// indices to an instruction with 2 pointer inputs (e.g. select, icmp). |
| 94 | /// Returns true if both operands are derived from the same alloca. Val should |
| 95 | /// be the same value as one of the input operands of UseInst. |
| 96 | bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val, |
| 97 | Instruction *UseInst, |
| 98 | int OpIdx0, int OpIdx1) const; |
| 99 | |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 100 | /// Check whether we have enough local memory for promotion. |
| 101 | bool hasSufficientLocalMem(const Function &F); |
| 102 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 103 | public: |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 104 | static char ID; |
| 105 | |
| Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 106 | AMDGPUPromoteAlloca() : FunctionPass(ID) {} |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 107 | |
| Benjamin Kramer | 8c90fd7 | 2014-09-03 11:41:21 +0000 | [diff] [blame] | 108 | bool doInitialization(Module &M) override; |
| 109 | bool runOnFunction(Function &F) override; |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 110 | |
| Mehdi Amini | 117296c | 2016-10-01 02:56:57 +0000 | [diff] [blame] | 111 | StringRef getPassName() const override { return "AMDGPU Promote Alloca"; } |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 112 | |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 113 | bool handleAlloca(AllocaInst &I, bool SufficientLDS); |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 114 | |
| 115 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 116 | AU.setPreservesCFG(); |
| 117 | FunctionPass::getAnalysisUsage(AU); |
| 118 | } |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 119 | }; |
| 120 | |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 121 | } // end anonymous namespace |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 122 | |
| 123 | char AMDGPUPromoteAlloca::ID = 0; |
| 124 | |
| Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 125 | INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE, |
| 126 | "AMDGPU promote alloca to vector or LDS", false, false) |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 127 | |
| 128 | char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID; |
| 129 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 130 | bool AMDGPUPromoteAlloca::doInitialization(Module &M) { |
| 131 | Mod = &M; |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 132 | DL = &Mod->getDataLayout(); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 133 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 134 | return false; |
| 135 | } |
| 136 | |
| 137 | bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { |
| Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 138 | if (skipFunction(F)) |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 139 | return false; |
| 140 | |
| Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 141 | if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) |
| 142 | TM = &TPC->getTM<TargetMachine>(); |
| 143 | else |
| 144 | return false; |
| 145 | |
| 146 | const Triple &TT = TM->getTargetTriple(); |
| 147 | IsAMDGCN = TT.getArch() == Triple::amdgcn; |
| 148 | IsAMDHSA = TT.getOS() == Triple::AMDHSA; |
| 149 | |
| Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 150 | const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F); |
| 151 | if (!ST.isPromoteAllocaEnabled()) |
| 152 | return false; |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 153 | |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 154 | AS = AMDGPU::getAMDGPUAS(*F.getParent()); |
| Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 155 | |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 156 | bool SufficientLDS = hasSufficientLocalMem(F); |
| 157 | bool Changed = false; |
| Matt Arsenault | bafc9dc | 2016-03-11 08:20:50 +0000 | [diff] [blame] | 158 | BasicBlock &EntryBB = *F.begin(); |
| 159 | for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) { |
| 160 | AllocaInst *AI = dyn_cast<AllocaInst>(I); |
| 161 | |
| 162 | ++I; |
| 163 | if (AI) |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 164 | Changed |= handleAlloca(*AI, SufficientLDS); |
| Matt Arsenault | bafc9dc | 2016-03-11 08:20:50 +0000 | [diff] [blame] | 165 | } |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 166 | |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 167 | return Changed; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 168 | } |
| 169 | |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 170 | std::pair<Value *, Value *> |
| 171 | AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) { |
| Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 172 | const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>( |
| 173 | *Builder.GetInsertBlock()->getParent()); |
| 174 | |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 175 | if (!IsAMDHSA) { |
| 176 | Function *LocalSizeYFn |
| 177 | = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y); |
| 178 | Function *LocalSizeZFn |
| 179 | = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z); |
| 180 | |
| 181 | CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {}); |
| 182 | CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {}); |
| 183 | |
| Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 184 | ST.makeLIDRangeMetadata(LocalSizeY); |
| 185 | ST.makeLIDRangeMetadata(LocalSizeZ); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 186 | |
| 187 | return std::make_pair(LocalSizeY, LocalSizeZ); |
| 188 | } |
| 189 | |
| 190 | // We must read the size out of the dispatch pointer. |
| 191 | assert(IsAMDGCN); |
| 192 | |
| 193 | // We are indexing into this struct, and want to extract the workgroup_size_* |
| 194 | // fields. |
| 195 | // |
| 196 | // typedef struct hsa_kernel_dispatch_packet_s { |
| 197 | // uint16_t header; |
| 198 | // uint16_t setup; |
| 199 | // uint16_t workgroup_size_x ; |
| 200 | // uint16_t workgroup_size_y; |
| 201 | // uint16_t workgroup_size_z; |
| 202 | // uint16_t reserved0; |
| 203 | // uint32_t grid_size_x ; |
| 204 | // uint32_t grid_size_y ; |
| 205 | // uint32_t grid_size_z; |
| 206 | // |
| 207 | // uint32_t private_segment_size; |
| 208 | // uint32_t group_segment_size; |
| 209 | // uint64_t kernel_object; |
| 210 | // |
| 211 | // #ifdef HSA_LARGE_MODEL |
| 212 | // void *kernarg_address; |
| 213 | // #elif defined HSA_LITTLE_ENDIAN |
| 214 | // void *kernarg_address; |
| 215 | // uint32_t reserved1; |
| 216 | // #else |
| 217 | // uint32_t reserved1; |
| 218 | // void *kernarg_address; |
| 219 | // #endif |
| 220 | // uint64_t reserved2; |
| 221 | // hsa_signal_t completion_signal; // uint64_t wrapper |
| 222 | // } hsa_kernel_dispatch_packet_t |
| 223 | // |
| 224 | Function *DispatchPtrFn |
| 225 | = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr); |
| 226 | |
| 227 | CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {}); |
| Reid Kleckner | b518054 | 2017-03-21 16:57:19 +0000 | [diff] [blame] | 228 | DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); |
| 229 | DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 230 | |
| 231 | // Size of the dispatch packet struct. |
| Reid Kleckner | b518054 | 2017-03-21 16:57:19 +0000 | [diff] [blame] | 232 | DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 233 | |
| 234 | Type *I32Ty = Type::getInt32Ty(Mod->getContext()); |
| 235 | Value *CastDispatchPtr = Builder.CreateBitCast( |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 236 | DispatchPtr, PointerType::get(I32Ty, AS.CONSTANT_ADDRESS)); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 237 | |
| 238 | // We could do a single 64-bit load here, but it's likely that the basic |
| 239 | // 32-bit and extract sequence is already present, and it is probably easier |
| 240 | // to CSE this. The loads should be mergable later anyway. |
| 241 | Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1); |
| 242 | LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4); |
| 243 | |
| 244 | Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2); |
| 245 | LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4); |
| 246 | |
| Eugene Zelenko | 734bb7b | 2017-01-20 17:52:16 +0000 | [diff] [blame] | 247 | MDNode *MD = MDNode::get(Mod->getContext(), None); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 248 | LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); |
| 249 | LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); |
| Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 250 | ST.makeLIDRangeMetadata(LoadZU); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 251 | |
| 252 | // Extract y component. Upper half of LoadZU should be zero already. |
| 253 | Value *Y = Builder.CreateLShr(LoadXY, 16); |
| 254 | |
| 255 | return std::make_pair(Y, LoadZU); |
| 256 | } |
| 257 | |
| 258 | Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) { |
| Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 259 | const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>( |
| 260 | *Builder.GetInsertBlock()->getParent()); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 261 | Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic; |
| 262 | |
| 263 | switch (N) { |
| 264 | case 0: |
| 265 | IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x |
| 266 | : Intrinsic::r600_read_tidig_x; |
| 267 | break; |
| 268 | case 1: |
| 269 | IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y |
| 270 | : Intrinsic::r600_read_tidig_y; |
| 271 | break; |
| 272 | |
| 273 | case 2: |
| 274 | IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z |
| 275 | : Intrinsic::r600_read_tidig_z; |
| 276 | break; |
| 277 | default: |
| 278 | llvm_unreachable("invalid dimension"); |
| 279 | } |
| 280 | |
| 281 | Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID); |
| 282 | CallInst *CI = Builder.CreateCall(WorkitemIdFn); |
| Stanislav Mekhanoshin | c90347d | 2017-04-12 20:48:56 +0000 | [diff] [blame] | 283 | ST.makeLIDRangeMetadata(CI); |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 284 | |
| 285 | return CI; |
| 286 | } |
| 287 | |
| Craig Topper | e3dcce9 | 2015-08-01 22:20:21 +0000 | [diff] [blame] | 288 | static VectorType *arrayTypeToVecType(Type *ArrayTy) { |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 289 | return VectorType::get(ArrayTy->getArrayElementType(), |
| 290 | ArrayTy->getArrayNumElements()); |
| 291 | } |
| 292 | |
| Benjamin Kramer | c6cc58e | 2014-10-04 16:55:56 +0000 | [diff] [blame] | 293 | static Value * |
| 294 | calculateVectorIndex(Value *Ptr, |
| 295 | const std::map<GetElementPtrInst *, Value *> &GEPIdx) { |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 296 | GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr); |
| 297 | |
| Benjamin Kramer | c6cc58e | 2014-10-04 16:55:56 +0000 | [diff] [blame] | 298 | auto I = GEPIdx.find(GEP); |
| 299 | return I == GEPIdx.end() ? nullptr : I->second; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 300 | } |
| 301 | |
| 302 | static Value* GEPToVectorIndex(GetElementPtrInst *GEP) { |
| 303 | // FIXME we only support simple cases |
| 304 | if (GEP->getNumOperands() != 3) |
| Matt Arsenault | efb2454 | 2016-07-18 18:34:53 +0000 | [diff] [blame] | 305 | return nullptr; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 306 | |
| 307 | ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1)); |
| 308 | if (!I0 || !I0->isZero()) |
| Matt Arsenault | efb2454 | 2016-07-18 18:34:53 +0000 | [diff] [blame] | 309 | return nullptr; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 310 | |
| 311 | return GEP->getOperand(2); |
| 312 | } |
| 313 | |
| Matt Arsenault | 642d2e7 | 2014-06-27 16:52:49 +0000 | [diff] [blame] | 314 | // Not an instruction handled below to turn into a vector. |
| 315 | // |
| 316 | // TODO: Check isTriviallyVectorizable for calls and handle other |
| 317 | // instructions. |
| Matt Arsenault | 7227cc1 | 2015-07-28 18:47:00 +0000 | [diff] [blame] | 318 | static bool canVectorizeInst(Instruction *Inst, User *User) { |
| Matt Arsenault | 642d2e7 | 2014-06-27 16:52:49 +0000 | [diff] [blame] | 319 | switch (Inst->getOpcode()) { |
| Changpeng Fang | 161e8c39af | 2017-05-12 20:31:12 +0000 | [diff] [blame] | 320 | case Instruction::Load: { |
| 321 | LoadInst *LI = cast<LoadInst>(Inst); |
| David Stuttard | 82618ba | 2017-06-09 14:16:22 +0000 | [diff] [blame] | 322 | // Currently only handle the case where the Pointer Operand is a GEP so check for that case. |
| 323 | return isa<GetElementPtrInst>(LI->getPointerOperand()) && !LI->isVolatile(); |
| Changpeng Fang | 161e8c39af | 2017-05-12 20:31:12 +0000 | [diff] [blame] | 324 | } |
| Matt Arsenault | 642d2e7 | 2014-06-27 16:52:49 +0000 | [diff] [blame] | 325 | case Instruction::BitCast: |
| 326 | case Instruction::AddrSpaceCast: |
| 327 | return true; |
| Matt Arsenault | 7227cc1 | 2015-07-28 18:47:00 +0000 | [diff] [blame] | 328 | case Instruction::Store: { |
| David Stuttard | 82618ba | 2017-06-09 14:16:22 +0000 | [diff] [blame] | 329 | // Must be the stored pointer operand, not a stored value, plus |
| 330 | // since it should be canonical form, the User should be a GEP. |
| Matt Arsenault | 7227cc1 | 2015-07-28 18:47:00 +0000 | [diff] [blame] | 331 | StoreInst *SI = cast<StoreInst>(Inst); |
| David Stuttard | 82618ba | 2017-06-09 14:16:22 +0000 | [diff] [blame] | 332 | return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && !SI->isVolatile(); |
| Matt Arsenault | 7227cc1 | 2015-07-28 18:47:00 +0000 | [diff] [blame] | 333 | } |
| Matt Arsenault | 642d2e7 | 2014-06-27 16:52:49 +0000 | [diff] [blame] | 334 | default: |
| 335 | return false; |
| 336 | } |
| 337 | } |
| 338 | |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 339 | static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) { |
| Matt Arsenault | fb8cdba | 2016-02-02 19:32:35 +0000 | [diff] [blame] | 340 | ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType()); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 341 | |
| Matt Arsenault | fb8cdba | 2016-02-02 19:32:35 +0000 | [diff] [blame] | 342 | DEBUG(dbgs() << "Alloca candidate for vectorization\n"); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 343 | |
| 344 | // FIXME: There is no reason why we can't support larger arrays, we |
| 345 | // are just being conservative for now. |
| David Stuttard | 82618ba | 2017-06-09 14:16:22 +0000 | [diff] [blame] | 346 | // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these |
| 347 | // could also be promoted but we don't currently handle this case |
| Matt Arsenault | fb8cdba | 2016-02-02 19:32:35 +0000 | [diff] [blame] | 348 | if (!AllocaTy || |
| 349 | AllocaTy->getElementType()->isVectorTy() || |
| David Stuttard | 82618ba | 2017-06-09 14:16:22 +0000 | [diff] [blame] | 350 | AllocaTy->getElementType()->isArrayTy() || |
| Matt Arsenault | efb2454 | 2016-07-18 18:34:53 +0000 | [diff] [blame] | 351 | AllocaTy->getNumElements() > 4 || |
| 352 | AllocaTy->getNumElements() < 2) { |
| Matt Arsenault | fb8cdba | 2016-02-02 19:32:35 +0000 | [diff] [blame] | 353 | DEBUG(dbgs() << " Cannot convert type to vector\n"); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 354 | return false; |
| 355 | } |
| 356 | |
| 357 | std::map<GetElementPtrInst*, Value*> GEPVectorIdx; |
| 358 | std::vector<Value*> WorkList; |
| 359 | for (User *AllocaUser : Alloca->users()) { |
| 360 | GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser); |
| 361 | if (!GEP) { |
| Matt Arsenault | 7227cc1 | 2015-07-28 18:47:00 +0000 | [diff] [blame] | 362 | if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca)) |
| Matt Arsenault | 642d2e7 | 2014-06-27 16:52:49 +0000 | [diff] [blame] | 363 | return false; |
| 364 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 365 | WorkList.push_back(AllocaUser); |
| 366 | continue; |
| 367 | } |
| 368 | |
| 369 | Value *Index = GEPToVectorIndex(GEP); |
| 370 | |
| 371 | // If we can't compute a vector index from this GEP, then we can't |
| 372 | // promote this alloca to vector. |
| 373 | if (!Index) { |
| Matt Arsenault | 6f62cf8 | 2014-06-27 02:36:59 +0000 | [diff] [blame] | 374 | DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP << '\n'); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 375 | return false; |
| 376 | } |
| 377 | |
| 378 | GEPVectorIdx[GEP] = Index; |
| 379 | for (User *GEPUser : AllocaUser->users()) { |
| Matt Arsenault | 7227cc1 | 2015-07-28 18:47:00 +0000 | [diff] [blame] | 380 | if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser)) |
| Matt Arsenault | 642d2e7 | 2014-06-27 16:52:49 +0000 | [diff] [blame] | 381 | return false; |
| 382 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 383 | WorkList.push_back(GEPUser); |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | VectorType *VectorTy = arrayTypeToVecType(AllocaTy); |
| 388 | |
| Matt Arsenault | 6f62cf8 | 2014-06-27 02:36:59 +0000 | [diff] [blame] | 389 | DEBUG(dbgs() << " Converting alloca to vector " |
| 390 | << *AllocaTy << " -> " << *VectorTy << '\n'); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 391 | |
| Matt Arsenault | fb8cdba | 2016-02-02 19:32:35 +0000 | [diff] [blame] | 392 | for (Value *V : WorkList) { |
| 393 | Instruction *Inst = cast<Instruction>(V); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 394 | IRBuilder<> Builder(Inst); |
| 395 | switch (Inst->getOpcode()) { |
| 396 | case Instruction::Load: { |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 397 | Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS); |
| David Stuttard | 82618ba | 2017-06-09 14:16:22 +0000 | [diff] [blame] | 398 | Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand(); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 399 | Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); |
| Matt Arsenault | efb2454 | 2016-07-18 18:34:53 +0000 | [diff] [blame] | 400 | |
| 401 | Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 402 | Value *VecValue = Builder.CreateLoad(BitCast); |
| 403 | Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index); |
| 404 | Inst->replaceAllUsesWith(ExtractElement); |
| 405 | Inst->eraseFromParent(); |
| 406 | break; |
| 407 | } |
| 408 | case Instruction::Store: { |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 409 | Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS); |
| Matt Arsenault | efb2454 | 2016-07-18 18:34:53 +0000 | [diff] [blame] | 410 | |
| David Stuttard | 82618ba | 2017-06-09 14:16:22 +0000 | [diff] [blame] | 411 | StoreInst *SI = cast<StoreInst>(Inst); |
| 412 | Value *Ptr = SI->getPointerOperand(); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 413 | Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); |
| Matt Arsenault | efb2454 | 2016-07-18 18:34:53 +0000 | [diff] [blame] | 414 | Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 415 | Value *VecValue = Builder.CreateLoad(BitCast); |
| 416 | Value *NewVecValue = Builder.CreateInsertElement(VecValue, |
| David Stuttard | 82618ba | 2017-06-09 14:16:22 +0000 | [diff] [blame] | 417 | SI->getValueOperand(), |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 418 | Index); |
| 419 | Builder.CreateStore(NewVecValue, BitCast); |
| 420 | Inst->eraseFromParent(); |
| 421 | break; |
| 422 | } |
| 423 | case Instruction::BitCast: |
| Matt Arsenault | 642d2e7 | 2014-06-27 16:52:49 +0000 | [diff] [blame] | 424 | case Instruction::AddrSpaceCast: |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 425 | break; |
| 426 | |
| 427 | default: |
| Matt Arsenault | 642d2e7 | 2014-06-27 16:52:49 +0000 | [diff] [blame] | 428 | llvm_unreachable("Inconsistency in instructions promotable to vector"); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 429 | } |
| 430 | } |
| 431 | return true; |
| 432 | } |
| 433 | |
| Matt Arsenault | ad13484 | 2016-02-02 19:18:53 +0000 | [diff] [blame] | 434 | static bool isCallPromotable(CallInst *CI) { |
| Matt Arsenault | ad13484 | 2016-02-02 19:18:53 +0000 | [diff] [blame] | 435 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); |
| 436 | if (!II) |
| 437 | return false; |
| 438 | |
| 439 | switch (II->getIntrinsicID()) { |
| 440 | case Intrinsic::memcpy: |
| Matt Arsenault | 7e747f1 | 2016-02-02 20:28:10 +0000 | [diff] [blame] | 441 | case Intrinsic::memmove: |
| Matt Arsenault | ad13484 | 2016-02-02 19:18:53 +0000 | [diff] [blame] | 442 | case Intrinsic::memset: |
| 443 | case Intrinsic::lifetime_start: |
| 444 | case Intrinsic::lifetime_end: |
| 445 | case Intrinsic::invariant_start: |
| 446 | case Intrinsic::invariant_end: |
| 447 | case Intrinsic::invariant_group_barrier: |
| Matt Arsenault | 7e747f1 | 2016-02-02 20:28:10 +0000 | [diff] [blame] | 448 | case Intrinsic::objectsize: |
| Matt Arsenault | ad13484 | 2016-02-02 19:18:53 +0000 | [diff] [blame] | 449 | return true; |
| 450 | default: |
| 451 | return false; |
| 452 | } |
| 453 | } |
| 454 | |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 455 | bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca, |
| 456 | Value *Val, |
| 457 | Instruction *Inst, |
| 458 | int OpIdx0, |
| 459 | int OpIdx1) const { |
| 460 | // Figure out which operand is the one we might not be promoting. |
| 461 | Value *OtherOp = Inst->getOperand(OpIdx0); |
| 462 | if (Val == OtherOp) |
| 463 | OtherOp = Inst->getOperand(OpIdx1); |
| 464 | |
| Matt Arsenault | 891fccc | 2016-05-18 15:57:21 +0000 | [diff] [blame] | 465 | if (isa<ConstantPointerNull>(OtherOp)) |
| 466 | return true; |
| 467 | |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 468 | Value *OtherObj = GetUnderlyingObject(OtherOp, *DL); |
| 469 | if (!isa<AllocaInst>(OtherObj)) |
| 470 | return false; |
| 471 | |
| 472 | // TODO: We should be able to replace undefs with the right pointer type. |
| 473 | |
| 474 | // TODO: If we know the other base object is another promotable |
| 475 | // alloca, not necessarily this alloca, we can do this. The |
| 476 | // important part is both must have the same address space at |
| 477 | // the end. |
| 478 | if (OtherObj != BaseAlloca) { |
| 479 | DEBUG(dbgs() << "Found a binary instruction with another alloca object\n"); |
| 480 | return false; |
| 481 | } |
| 482 | |
| 483 | return true; |
| 484 | } |
| 485 | |
| 486 | bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes( |
| 487 | Value *BaseAlloca, |
| 488 | Value *Val, |
| 489 | std::vector<Value*> &WorkList) const { |
| 490 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 491 | for (User *User : Val->users()) { |
| David Majnemer | 0d955d0 | 2016-08-11 22:21:41 +0000 | [diff] [blame] | 492 | if (is_contained(WorkList, User)) |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 493 | continue; |
| Matt Arsenault | ad13484 | 2016-02-02 19:18:53 +0000 | [diff] [blame] | 494 | |
| Matt Arsenault | fdcd39a | 2015-07-28 18:29:14 +0000 | [diff] [blame] | 495 | if (CallInst *CI = dyn_cast<CallInst>(User)) { |
| Matt Arsenault | ad13484 | 2016-02-02 19:18:53 +0000 | [diff] [blame] | 496 | if (!isCallPromotable(CI)) |
| Matt Arsenault | fdcd39a | 2015-07-28 18:29:14 +0000 | [diff] [blame] | 497 | return false; |
| 498 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 499 | WorkList.push_back(User); |
| 500 | continue; |
| 501 | } |
| Tom Stellard | 5b2927f | 2014-10-31 20:52:04 +0000 | [diff] [blame] | 502 | |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 503 | Instruction *UseInst = cast<Instruction>(User); |
| 504 | if (UseInst->getOpcode() == Instruction::PtrToInt) |
| Tom Stellard | 5b2927f | 2014-10-31 20:52:04 +0000 | [diff] [blame] | 505 | return false; |
| 506 | |
| Matt Arsenault | 210b7cf | 2016-07-18 19:00:07 +0000 | [diff] [blame] | 507 | if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) { |
| Matt Arsenault | c438ef5 | 2016-05-18 23:20:24 +0000 | [diff] [blame] | 508 | if (LI->isVolatile()) |
| 509 | return false; |
| 510 | |
| 511 | continue; |
| 512 | } |
| 513 | |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 514 | if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) { |
| Matt Arsenault | 0a30e45 | 2016-03-23 23:17:29 +0000 | [diff] [blame] | 515 | if (SI->isVolatile()) |
| 516 | return false; |
| 517 | |
| Matt Arsenault | 7227cc1 | 2015-07-28 18:47:00 +0000 | [diff] [blame] | 518 | // Reject if the stored value is not the pointer operand. |
| 519 | if (SI->getPointerOperand() != Val) |
| 520 | return false; |
| Matt Arsenault | 210b7cf | 2016-07-18 19:00:07 +0000 | [diff] [blame] | 521 | } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { |
| Matt Arsenault | 0a30e45 | 2016-03-23 23:17:29 +0000 | [diff] [blame] | 522 | if (RMW->isVolatile()) |
| 523 | return false; |
| Matt Arsenault | 210b7cf | 2016-07-18 19:00:07 +0000 | [diff] [blame] | 524 | } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) { |
| Matt Arsenault | 0a30e45 | 2016-03-23 23:17:29 +0000 | [diff] [blame] | 525 | if (CAS->isVolatile()) |
| 526 | return false; |
| Matt Arsenault | 7227cc1 | 2015-07-28 18:47:00 +0000 | [diff] [blame] | 527 | } |
| 528 | |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 529 | // Only promote a select if we know that the other select operand |
| 530 | // is from another pointer that will also be promoted. |
| 531 | if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) { |
| 532 | if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) |
| 533 | return false; |
| Matt Arsenault | 891fccc | 2016-05-18 15:57:21 +0000 | [diff] [blame] | 534 | |
| 535 | // May need to rewrite constant operands. |
| 536 | WorkList.push_back(ICmp); |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 537 | } |
| 538 | |
| Matt Arsenault | 2402b95 | 2016-12-10 00:52:50 +0000 | [diff] [blame] | 539 | if (UseInst->getOpcode() == Instruction::AddrSpaceCast) { |
| Changpeng Fang | c85abbd | 2017-01-24 19:06:28 +0000 | [diff] [blame] | 540 | // Give up if the pointer may be captured. |
| 541 | if (PointerMayBeCaptured(UseInst, true, true)) |
| 542 | return false; |
| Matt Arsenault | 2402b95 | 2016-12-10 00:52:50 +0000 | [diff] [blame] | 543 | // Don't collect the users of this. |
| 544 | WorkList.push_back(User); |
| 545 | continue; |
| 546 | } |
| 547 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 548 | if (!User->getType()->isPointerTy()) |
| 549 | continue; |
| Tom Stellard | 5b2927f | 2014-10-31 20:52:04 +0000 | [diff] [blame] | 550 | |
| Matt Arsenault | de42081 | 2016-02-02 21:16:12 +0000 | [diff] [blame] | 551 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) { |
| 552 | // Be conservative if an address could be computed outside the bounds of |
| 553 | // the alloca. |
| 554 | if (!GEP->isInBounds()) |
| 555 | return false; |
| 556 | } |
| 557 | |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 558 | // Only promote a select if we know that the other select operand is from |
| 559 | // another pointer that will also be promoted. |
| 560 | if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) { |
| 561 | if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) |
| 562 | return false; |
| 563 | } |
| 564 | |
| 565 | // Repeat for phis. |
| 566 | if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) { |
| 567 | // TODO: Handle more complex cases. We should be able to replace loops |
| 568 | // over arrays. |
| 569 | switch (Phi->getNumIncomingValues()) { |
| 570 | case 1: |
| 571 | break; |
| 572 | case 2: |
| 573 | if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1)) |
| 574 | return false; |
| 575 | break; |
| 576 | default: |
| 577 | return false; |
| 578 | } |
| 579 | } |
| 580 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 581 | WorkList.push_back(User); |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 582 | if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList)) |
| Matt Arsenault | ad13484 | 2016-02-02 19:18:53 +0000 | [diff] [blame] | 583 | return false; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 584 | } |
| Matt Arsenault | ad13484 | 2016-02-02 19:18:53 +0000 | [diff] [blame] | 585 | |
| 586 | return true; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 587 | } |
| 588 | |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 589 | bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) { |
| 590 | |
| 591 | FunctionType *FTy = F.getFunctionType(); |
| 592 | const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F); |
| 593 | |
| 594 | // If the function has any arguments in the local address space, then it's |
| 595 | // possible these arguments require the entire local memory space, so |
| 596 | // we cannot use local memory in the pass. |
| 597 | for (Type *ParamTy : FTy->params()) { |
| 598 | PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); |
| 599 | if (PtrTy && PtrTy->getAddressSpace() == AS.LOCAL_ADDRESS) { |
| 600 | LocalMemLimit = 0; |
| 601 | DEBUG(dbgs() << "Function has local memory argument. Promoting to " |
| 602 | "local memory disabled.\n"); |
| 603 | return false; |
| 604 | } |
| 605 | } |
| 606 | |
| 607 | LocalMemLimit = ST.getLocalMemorySize(); |
| 608 | if (LocalMemLimit == 0) |
| 609 | return false; |
| 610 | |
| 611 | const DataLayout &DL = Mod->getDataLayout(); |
| 612 | |
| 613 | // Check how much local memory is being used by global objects |
| 614 | CurrentLocalMemUsage = 0; |
| 615 | for (GlobalVariable &GV : Mod->globals()) { |
| 616 | if (GV.getType()->getAddressSpace() != AS.LOCAL_ADDRESS) |
| 617 | continue; |
| 618 | |
| 619 | for (const User *U : GV.users()) { |
| 620 | const Instruction *Use = dyn_cast<Instruction>(U); |
| 621 | if (!Use) |
| 622 | continue; |
| 623 | |
| 624 | if (Use->getParent()->getParent() == &F) { |
| 625 | unsigned Align = GV.getAlignment(); |
| 626 | if (Align == 0) |
| 627 | Align = DL.getABITypeAlignment(GV.getValueType()); |
| 628 | |
| 629 | // FIXME: Try to account for padding here. The padding is currently |
| 630 | // determined from the inverse order of uses in the function. I'm not |
| 631 | // sure if the use list order is in any way connected to this, so the |
| 632 | // total reported size is likely incorrect. |
| 633 | uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType()); |
| 634 | CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align); |
| 635 | CurrentLocalMemUsage += AllocSize; |
| 636 | break; |
| 637 | } |
| 638 | } |
| 639 | } |
| 640 | |
| 641 | unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, |
| 642 | F); |
| 643 | |
| 644 | // Restrict local memory usage so that we don't drastically reduce occupancy, |
| 645 | // unless it is already significantly reduced. |
| 646 | |
| 647 | // TODO: Have some sort of hint or other heuristics to guess occupancy based |
| 648 | // on other factors.. |
| 649 | unsigned OccupancyHint = ST.getWavesPerEU(F).second; |
| 650 | if (OccupancyHint == 0) |
| 651 | OccupancyHint = 7; |
| 652 | |
| 653 | // Clamp to max value. |
| 654 | OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU()); |
| 655 | |
| 656 | // Check the hint but ignore it if it's obviously wrong from the existing LDS |
| 657 | // usage. |
| 658 | MaxOccupancy = std::min(OccupancyHint, MaxOccupancy); |
| 659 | |
| 660 | |
| 661 | // Round up to the next tier of usage. |
| 662 | unsigned MaxSizeWithWaveCount |
| 663 | = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F); |
| 664 | |
| 665 | // Program is possibly broken by using more local mem than available. |
| 666 | if (CurrentLocalMemUsage > MaxSizeWithWaveCount) |
| 667 | return false; |
| 668 | |
| 669 | LocalMemLimit = MaxSizeWithWaveCount; |
| 670 | |
| 671 | DEBUG( |
| 672 | dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n" |
| 673 | << " Rounding size to " << MaxSizeWithWaveCount |
| 674 | << " with a maximum occupancy of " << MaxOccupancy << '\n' |
| 675 | << " and " << (LocalMemLimit - CurrentLocalMemUsage) |
| 676 | << " available for promotion\n" |
| 677 | ); |
| 678 | |
| 679 | return true; |
| 680 | } |
| 681 | |
| Matt Arsenault | 8a028bf | 2016-05-16 21:19:59 +0000 | [diff] [blame] | 682 | // FIXME: Should try to pick the most likely to be profitable allocas first. |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 683 | bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) { |
| Matt Arsenault | c5fce69 | 2016-04-28 18:38:48 +0000 | [diff] [blame] | 684 | // Array allocations are probably not worth handling, since an allocation of |
| 685 | // the array type is the canonical form. |
| 686 | if (!I.isStaticAlloca() || I.isArrayAllocation()) |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 687 | return false; |
| Matt Arsenault | 19c5488 | 2015-08-26 18:37:13 +0000 | [diff] [blame] | 688 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 689 | IRBuilder<> Builder(&I); |
| 690 | |
| 691 | // First try to replace the alloca with a vector |
| 692 | Type *AllocaTy = I.getAllocatedType(); |
| 693 | |
| Matt Arsenault | 6f62cf8 | 2014-06-27 02:36:59 +0000 | [diff] [blame] | 694 | DEBUG(dbgs() << "Trying to promote " << I << '\n'); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 695 | |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 696 | if (tryPromoteAllocaToVector(&I, AS)) |
| 697 | return true; // Promoted to vector. |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 698 | |
| Tom Stellard | 79a1fd7 | 2016-04-14 16:27:07 +0000 | [diff] [blame] | 699 | const Function &ContainingFunction = *I.getParent()->getParent(); |
| Matt Arsenault | 5c80618 | 2017-05-02 18:33:18 +0000 | [diff] [blame] | 700 | CallingConv::ID CC = ContainingFunction.getCallingConv(); |
| Tom Stellard | 79a1fd7 | 2016-04-14 16:27:07 +0000 | [diff] [blame] | 701 | |
| Nicolai Haehnle | bef1ceb | 2016-07-18 09:02:47 +0000 | [diff] [blame] | 702 | // Don't promote the alloca to LDS for shader calling conventions as the work |
| 703 | // item ID intrinsics are not supported for these calling conventions. |
| 704 | // Furthermore not all LDS is available for some of the stages. |
| Matt Arsenault | 5c80618 | 2017-05-02 18:33:18 +0000 | [diff] [blame] | 705 | switch (CC) { |
| 706 | case CallingConv::AMDGPU_KERNEL: |
| 707 | case CallingConv::SPIR_KERNEL: |
| 708 | break; |
| 709 | default: |
| 710 | DEBUG(dbgs() << " promote alloca to LDS not supported with calling convention.\n"); |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 711 | return false; |
| Matt Arsenault | 5c80618 | 2017-05-02 18:33:18 +0000 | [diff] [blame] | 712 | } |
| Nicolai Haehnle | bef1ceb | 2016-07-18 09:02:47 +0000 | [diff] [blame] | 713 | |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 714 | // Not likely to have sufficient local memory for promotion. |
| 715 | if (!SufficientLDS) |
| 716 | return false; |
| 717 | |
| Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 718 | const AMDGPUSubtarget &ST = |
| 719 | TM->getSubtarget<AMDGPUSubtarget>(ContainingFunction); |
| Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 720 | unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second; |
| Tom Stellard | 79a1fd7 | 2016-04-14 16:27:07 +0000 | [diff] [blame] | 721 | |
| Matt Arsenault | 8a028bf | 2016-05-16 21:19:59 +0000 | [diff] [blame] | 722 | const DataLayout &DL = Mod->getDataLayout(); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 723 | |
| Matt Arsenault | 8a028bf | 2016-05-16 21:19:59 +0000 | [diff] [blame] | 724 | unsigned Align = I.getAlignment(); |
| 725 | if (Align == 0) |
| 726 | Align = DL.getABITypeAlignment(I.getAllocatedType()); |
| 727 | |
| 728 | // FIXME: This computed padding is likely wrong since it depends on inverse |
| 729 | // usage order. |
| 730 | // |
| 731 | // FIXME: It is also possible that if we're allowed to use all of the memory |
| 732 | // could could end up using more than the maximum due to alignment padding. |
| 733 | |
| 734 | uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align); |
| 735 | uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy); |
| 736 | NewSize += AllocSize; |
| 737 | |
| 738 | if (NewSize > LocalMemLimit) { |
| 739 | DEBUG(dbgs() << " " << AllocSize |
| 740 | << " bytes of local memory not available to promote\n"); |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 741 | return false; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 742 | } |
| 743 | |
| Matt Arsenault | 8a028bf | 2016-05-16 21:19:59 +0000 | [diff] [blame] | 744 | CurrentLocalMemUsage = NewSize; |
| 745 | |
| Tom Stellard | 5b2927f | 2014-10-31 20:52:04 +0000 | [diff] [blame] | 746 | std::vector<Value*> WorkList; |
| 747 | |
| Matt Arsenault | a61cb48 | 2016-05-12 01:58:58 +0000 | [diff] [blame] | 748 | if (!collectUsesWithPtrTypes(&I, &I, WorkList)) { |
| Tom Stellard | 5b2927f | 2014-10-31 20:52:04 +0000 | [diff] [blame] | 749 | DEBUG(dbgs() << " Do not know how to convert all uses\n"); |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 750 | return false; |
| Tom Stellard | 5b2927f | 2014-10-31 20:52:04 +0000 | [diff] [blame] | 751 | } |
| 752 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 753 | DEBUG(dbgs() << "Promoting alloca to local memory\n"); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 754 | |
| Matt Arsenault | cf84e26 | 2016-02-05 19:47:23 +0000 | [diff] [blame] | 755 | Function *F = I.getParent()->getParent(); |
| 756 | |
| Tom Stellard | 79a1fd7 | 2016-04-14 16:27:07 +0000 | [diff] [blame] | 757 | Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 758 | GlobalVariable *GV = new GlobalVariable( |
| Matt Arsenault | cf84e26 | 2016-02-05 19:47:23 +0000 | [diff] [blame] | 759 | *Mod, GVTy, false, GlobalValue::InternalLinkage, |
| 760 | UndefValue::get(GVTy), |
| 761 | Twine(F->getName()) + Twine('.') + I.getName(), |
| 762 | nullptr, |
| 763 | GlobalVariable::NotThreadLocal, |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 764 | AS.LOCAL_ADDRESS); |
| Peter Collingbourne | 96efdd6 | 2016-06-14 21:01:22 +0000 | [diff] [blame] | 765 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); |
| Matt Arsenault | cf84e26 | 2016-02-05 19:47:23 +0000 | [diff] [blame] | 766 | GV->setAlignment(I.getAlignment()); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 767 | |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 768 | Value *TCntY, *TCntZ; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 769 | |
| Matt Arsenault | e013246 | 2016-01-30 05:19:45 +0000 | [diff] [blame] | 770 | std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); |
| 771 | Value *TIdX = getWorkitemID(Builder, 0); |
| 772 | Value *TIdY = getWorkitemID(Builder, 1); |
| 773 | Value *TIdZ = getWorkitemID(Builder, 2); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 774 | |
| Matt Arsenault | 853a1fc | 2016-02-02 19:18:48 +0000 | [diff] [blame] | 775 | Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 776 | Tmp0 = Builder.CreateMul(Tmp0, TIdX); |
| Matt Arsenault | 853a1fc | 2016-02-02 19:18:48 +0000 | [diff] [blame] | 777 | Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 778 | Value *TID = Builder.CreateAdd(Tmp0, Tmp1); |
| 779 | TID = Builder.CreateAdd(TID, TIdZ); |
| 780 | |
| Matt Arsenault | 853a1fc | 2016-02-02 19:18:48 +0000 | [diff] [blame] | 781 | Value *Indices[] = { |
| 782 | Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), |
| 783 | TID |
| 784 | }; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 785 | |
| Matt Arsenault | 853a1fc | 2016-02-02 19:18:48 +0000 | [diff] [blame] | 786 | Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 787 | I.mutateType(Offset->getType()); |
| 788 | I.replaceAllUsesWith(Offset); |
| 789 | I.eraseFromParent(); |
| 790 | |
| Matt Arsenault | fb8cdba | 2016-02-02 19:32:35 +0000 | [diff] [blame] | 791 | for (Value *V : WorkList) { |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 792 | CallInst *Call = dyn_cast<CallInst>(V); |
| 793 | if (!Call) { |
| Matt Arsenault | 891fccc | 2016-05-18 15:57:21 +0000 | [diff] [blame] | 794 | if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) { |
| 795 | Value *Src0 = CI->getOperand(0); |
| 796 | Type *EltTy = Src0->getType()->getPointerElementType(); |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 797 | PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS); |
| Matt Arsenault | 891fccc | 2016-05-18 15:57:21 +0000 | [diff] [blame] | 798 | |
| 799 | if (isa<ConstantPointerNull>(CI->getOperand(0))) |
| 800 | CI->setOperand(0, ConstantPointerNull::get(NewTy)); |
| 801 | |
| 802 | if (isa<ConstantPointerNull>(CI->getOperand(1))) |
| 803 | CI->setOperand(1, ConstantPointerNull::get(NewTy)); |
| 804 | |
| 805 | continue; |
| 806 | } |
| Matt Arsenault | 65f67e4 | 2014-09-15 15:41:44 +0000 | [diff] [blame] | 807 | |
| Matt Arsenault | 2402b95 | 2016-12-10 00:52:50 +0000 | [diff] [blame] | 808 | // The operand's value should be corrected on its own and we don't want to |
| 809 | // touch the users. |
| Matt Arsenault | 65f67e4 | 2014-09-15 15:41:44 +0000 | [diff] [blame] | 810 | if (isa<AddrSpaceCastInst>(V)) |
| 811 | continue; |
| 812 | |
| Matt Arsenault | 891fccc | 2016-05-18 15:57:21 +0000 | [diff] [blame] | 813 | Type *EltTy = V->getType()->getPointerElementType(); |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 814 | PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS); |
| Matt Arsenault | 891fccc | 2016-05-18 15:57:21 +0000 | [diff] [blame] | 815 | |
| Matt Arsenault | 65f67e4 | 2014-09-15 15:41:44 +0000 | [diff] [blame] | 816 | // FIXME: It doesn't really make sense to try to do this for all |
| 817 | // instructions. |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 818 | V->mutateType(NewTy); |
| Matt Arsenault | 891fccc | 2016-05-18 15:57:21 +0000 | [diff] [blame] | 819 | |
| 820 | // Adjust the types of any constant operands. |
| 821 | if (SelectInst *SI = dyn_cast<SelectInst>(V)) { |
| 822 | if (isa<ConstantPointerNull>(SI->getOperand(1))) |
| 823 | SI->setOperand(1, ConstantPointerNull::get(NewTy)); |
| 824 | |
| 825 | if (isa<ConstantPointerNull>(SI->getOperand(2))) |
| 826 | SI->setOperand(2, ConstantPointerNull::get(NewTy)); |
| 827 | } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { |
| 828 | for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { |
| 829 | if (isa<ConstantPointerNull>(Phi->getIncomingValue(I))) |
| 830 | Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); |
| 831 | } |
| 832 | } |
| 833 | |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 834 | continue; |
| 835 | } |
| 836 | |
| Matt Arsenault | 2e08e18 | 2016-07-18 18:34:48 +0000 | [diff] [blame] | 837 | IntrinsicInst *Intr = cast<IntrinsicInst>(Call); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 838 | Builder.SetInsertPoint(Intr); |
| 839 | switch (Intr->getIntrinsicID()) { |
| 840 | case Intrinsic::lifetime_start: |
| 841 | case Intrinsic::lifetime_end: |
| 842 | // These intrinsics are for address space 0 only |
| 843 | Intr->eraseFromParent(); |
| 844 | continue; |
| 845 | case Intrinsic::memcpy: { |
| 846 | MemCpyInst *MemCpy = cast<MemCpyInst>(Intr); |
| 847 | Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(), |
| Pete Cooper | 67cf9a7 | 2015-11-19 05:56:52 +0000 | [diff] [blame] | 848 | MemCpy->getLength(), MemCpy->getAlignment(), |
| 849 | MemCpy->isVolatile()); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 850 | Intr->eraseFromParent(); |
| 851 | continue; |
| 852 | } |
| Matt Arsenault | 7e747f1 | 2016-02-02 20:28:10 +0000 | [diff] [blame] | 853 | case Intrinsic::memmove: { |
| 854 | MemMoveInst *MemMove = cast<MemMoveInst>(Intr); |
| 855 | Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(), |
| 856 | MemMove->getLength(), MemMove->getAlignment(), |
| 857 | MemMove->isVolatile()); |
| 858 | Intr->eraseFromParent(); |
| 859 | continue; |
| 860 | } |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 861 | case Intrinsic::memset: { |
| 862 | MemSetInst *MemSet = cast<MemSetInst>(Intr); |
| 863 | Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(), |
| Pete Cooper | 67cf9a7 | 2015-11-19 05:56:52 +0000 | [diff] [blame] | 864 | MemSet->getLength(), MemSet->getAlignment(), |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 865 | MemSet->isVolatile()); |
| 866 | Intr->eraseFromParent(); |
| 867 | continue; |
| 868 | } |
| Matt Arsenault | 0b783ef0 | 2016-01-22 19:47:54 +0000 | [diff] [blame] | 869 | case Intrinsic::invariant_start: |
| 870 | case Intrinsic::invariant_end: |
| 871 | case Intrinsic::invariant_group_barrier: |
| 872 | Intr->eraseFromParent(); |
| 873 | // FIXME: I think the invariant marker should still theoretically apply, |
| 874 | // but the intrinsics need to be changed to accept pointers with any |
| 875 | // address space. |
| 876 | continue; |
| Matt Arsenault | 7e747f1 | 2016-02-02 20:28:10 +0000 | [diff] [blame] | 877 | case Intrinsic::objectsize: { |
| 878 | Value *Src = Intr->getOperand(0); |
| 879 | Type *SrcTy = Src->getType()->getPointerElementType(); |
| 880 | Function *ObjectSize = Intrinsic::getDeclaration(Mod, |
| 881 | Intrinsic::objectsize, |
| Yaxun Liu | 1a14bfa | 2017-03-27 14:04:01 +0000 | [diff] [blame] | 882 | { Intr->getType(), PointerType::get(SrcTy, AS.LOCAL_ADDRESS) } |
| Matt Arsenault | 7e747f1 | 2016-02-02 20:28:10 +0000 | [diff] [blame] | 883 | ); |
| 884 | |
| George Burgess IV | 56c7e88 | 2017-03-21 20:08:59 +0000 | [diff] [blame] | 885 | CallInst *NewCall = Builder.CreateCall( |
| 886 | ObjectSize, {Src, Intr->getOperand(1), Intr->getOperand(2)}); |
| Matt Arsenault | 7e747f1 | 2016-02-02 20:28:10 +0000 | [diff] [blame] | 887 | Intr->replaceAllUsesWith(NewCall); |
| 888 | Intr->eraseFromParent(); |
| 889 | continue; |
| 890 | } |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 891 | default: |
| Matthias Braun | 8c209aa | 2017-01-28 02:02:38 +0000 | [diff] [blame] | 892 | Intr->print(errs()); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 893 | llvm_unreachable("Don't know how to promote alloca intrinsic use."); |
| 894 | } |
| 895 | } |
| Changpeng Fang | 1dbace1 | 2017-05-23 20:25:41 +0000 | [diff] [blame] | 896 | return true; |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 897 | } |
| 898 | |
| Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 899 | FunctionPass *llvm::createAMDGPUPromoteAlloca() { |
| 900 | return new AMDGPUPromoteAlloca(); |
| Tom Stellard | 880a80a | 2014-06-17 16:53:14 +0000 | [diff] [blame] | 901 | } |