blob: 625c9b77e2dec1bf7ea371ef2027e2ee4a33a399 [file] [log] [blame]
Tom Stellard880a80a2014-06-17 16:53:14 +00001//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass eliminates allocas by either converting them into vectors or
11// by migrating them to local address space.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUSubtarget.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000017#include "Utils/AMDGPUBaseInfo.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Triple.h"
23#include "llvm/ADT/Twine.h"
Changpeng Fangc85abbd2017-01-24 19:06:28 +000024#include "llvm/Analysis/CaptureTracking.h"
Tom Stellard880a80a2014-06-17 16:53:14 +000025#include "llvm/Analysis/ValueTracking.h"
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +000026#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000027#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/Constant.h"
30#include "llvm/IR/Constants.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/GlobalVariable.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000036#include "llvm/IR/IRBuilder.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000037#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +000039#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000040#include "llvm/IR/Intrinsics.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000041#include "llvm/IR/LLVMContext.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000042#include "llvm/IR/Metadata.h"
43#include "llvm/IR/Module.h"
44#include "llvm/IR/Type.h"
45#include "llvm/IR/User.h"
46#include "llvm/IR/Value.h"
47#include "llvm/Pass.h"
48#include "llvm/Support/Casting.h"
Tom Stellard880a80a2014-06-17 16:53:14 +000049#include "llvm/Support/Debug.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000050#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/MathExtras.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000052#include "llvm/Support/raw_ostream.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000053#include "llvm/Target/TargetMachine.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <map>
58#include <tuple>
59#include <utility>
60#include <vector>
Tom Stellard880a80a2014-06-17 16:53:14 +000061
62#define DEBUG_TYPE "amdgpu-promote-alloca"
63
64using namespace llvm;
65
66namespace {
67
Matt Arsenaulte0132462016-01-30 05:19:45 +000068// FIXME: This can create globals so should be a module pass.
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +000069class AMDGPUPromoteAlloca : public FunctionPass {
Matt Arsenaulte0132462016-01-30 05:19:45 +000070private:
71 const TargetMachine *TM;
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000072 Module *Mod = nullptr;
73 const DataLayout *DL = nullptr;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000074 AMDGPUAS AS;
Matt Arsenaulte0132462016-01-30 05:19:45 +000075
76 // FIXME: This should be per-kernel.
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000077 uint32_t LocalMemLimit = 0;
78 uint32_t CurrentLocalMemUsage = 0;
Tom Stellard880a80a2014-06-17 16:53:14 +000079
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000080 bool IsAMDGCN = false;
81 bool IsAMDHSA = false;
Matt Arsenaulte0132462016-01-30 05:19:45 +000082
83 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
84 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
85
Matt Arsenaulta61cb482016-05-12 01:58:58 +000086 /// BaseAlloca is the alloca root the search started from.
87 /// Val may be that alloca or a recursive user of it.
88 bool collectUsesWithPtrTypes(Value *BaseAlloca,
89 Value *Val,
90 std::vector<Value*> &WorkList) const;
91
92 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
93 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
94 /// Returns true if both operands are derived from the same alloca. Val should
95 /// be the same value as one of the input operands of UseInst.
96 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
97 Instruction *UseInst,
98 int OpIdx0, int OpIdx1) const;
99
Changpeng Fang1dbace12017-05-23 20:25:41 +0000100 /// Check whether we have enough local memory for promotion.
101 bool hasSufficientLocalMem(const Function &F);
102
Tom Stellard880a80a2014-06-17 16:53:14 +0000103public:
Matt Arsenaulte0132462016-01-30 05:19:45 +0000104 static char ID;
105
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000106 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
Matt Arsenaulte0132462016-01-30 05:19:45 +0000107
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000108 bool doInitialization(Module &M) override;
109 bool runOnFunction(Function &F) override;
Matt Arsenaulte0132462016-01-30 05:19:45 +0000110
Mehdi Amini117296c2016-10-01 02:56:57 +0000111 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
Matt Arsenaulte0132462016-01-30 05:19:45 +0000112
Changpeng Fang1dbace12017-05-23 20:25:41 +0000113 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000114
115 void getAnalysisUsage(AnalysisUsage &AU) const override {
116 AU.setPreservesCFG();
117 FunctionPass::getAnalysisUsage(AU);
118 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000119};
120
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000121} // end anonymous namespace
Tom Stellard880a80a2014-06-17 16:53:14 +0000122
123char AMDGPUPromoteAlloca::ID = 0;
124
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000125INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
126 "AMDGPU promote alloca to vector or LDS", false, false)
Matt Arsenaulte0132462016-01-30 05:19:45 +0000127
128char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
129
Tom Stellard880a80a2014-06-17 16:53:14 +0000130bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
131 Mod = &M;
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000132 DL = &Mod->getDataLayout();
Matt Arsenaulte0132462016-01-30 05:19:45 +0000133
Tom Stellard880a80a2014-06-17 16:53:14 +0000134 return false;
135}
136
137bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000138 if (skipFunction(F))
Matt Arsenaulte0132462016-01-30 05:19:45 +0000139 return false;
140
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000141 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
142 TM = &TPC->getTM<TargetMachine>();
143 else
144 return false;
145
146 const Triple &TT = TM->getTargetTriple();
147 IsAMDGCN = TT.getArch() == Triple::amdgcn;
148 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
149
Matt Arsenault03d85842016-06-27 20:32:13 +0000150 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
151 if (!ST.isPromoteAllocaEnabled())
152 return false;
Changpeng Fang1dbace12017-05-23 20:25:41 +0000153
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000154 AS = AMDGPU::getAMDGPUAS(*F.getParent());
Matt Arsenault03d85842016-06-27 20:32:13 +0000155
Changpeng Fang1dbace12017-05-23 20:25:41 +0000156 bool SufficientLDS = hasSufficientLocalMem(F);
157 bool Changed = false;
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +0000158 BasicBlock &EntryBB = *F.begin();
159 for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
160 AllocaInst *AI = dyn_cast<AllocaInst>(I);
161
162 ++I;
163 if (AI)
Changpeng Fang1dbace12017-05-23 20:25:41 +0000164 Changed |= handleAlloca(*AI, SufficientLDS);
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +0000165 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000166
Changpeng Fang1dbace12017-05-23 20:25:41 +0000167 return Changed;
Tom Stellard880a80a2014-06-17 16:53:14 +0000168}
169
Matt Arsenaulte0132462016-01-30 05:19:45 +0000170std::pair<Value *, Value *>
171AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000172 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(
173 *Builder.GetInsertBlock()->getParent());
174
Matt Arsenaulte0132462016-01-30 05:19:45 +0000175 if (!IsAMDHSA) {
176 Function *LocalSizeYFn
177 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
178 Function *LocalSizeZFn
179 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
180
181 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
182 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
183
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000184 ST.makeLIDRangeMetadata(LocalSizeY);
185 ST.makeLIDRangeMetadata(LocalSizeZ);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000186
187 return std::make_pair(LocalSizeY, LocalSizeZ);
188 }
189
190 // We must read the size out of the dispatch pointer.
191 assert(IsAMDGCN);
192
193 // We are indexing into this struct, and want to extract the workgroup_size_*
194 // fields.
195 //
196 // typedef struct hsa_kernel_dispatch_packet_s {
197 // uint16_t header;
198 // uint16_t setup;
199 // uint16_t workgroup_size_x ;
200 // uint16_t workgroup_size_y;
201 // uint16_t workgroup_size_z;
202 // uint16_t reserved0;
203 // uint32_t grid_size_x ;
204 // uint32_t grid_size_y ;
205 // uint32_t grid_size_z;
206 //
207 // uint32_t private_segment_size;
208 // uint32_t group_segment_size;
209 // uint64_t kernel_object;
210 //
211 // #ifdef HSA_LARGE_MODEL
212 // void *kernarg_address;
213 // #elif defined HSA_LITTLE_ENDIAN
214 // void *kernarg_address;
215 // uint32_t reserved1;
216 // #else
217 // uint32_t reserved1;
218 // void *kernarg_address;
219 // #endif
220 // uint64_t reserved2;
221 // hsa_signal_t completion_signal; // uint64_t wrapper
222 // } hsa_kernel_dispatch_packet_t
223 //
224 Function *DispatchPtrFn
225 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
226
227 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
Reid Klecknerb5180542017-03-21 16:57:19 +0000228 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
229 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000230
231 // Size of the dispatch packet struct.
Reid Klecknerb5180542017-03-21 16:57:19 +0000232 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000233
234 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
235 Value *CastDispatchPtr = Builder.CreateBitCast(
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000236 DispatchPtr, PointerType::get(I32Ty, AS.CONSTANT_ADDRESS));
Matt Arsenaulte0132462016-01-30 05:19:45 +0000237
238 // We could do a single 64-bit load here, but it's likely that the basic
239 // 32-bit and extract sequence is already present, and it is probably easier
240 // to CSE this. The loads should be mergable later anyway.
241 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
242 LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
243
244 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
245 LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
246
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000247 MDNode *MD = MDNode::get(Mod->getContext(), None);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000248 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
249 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000250 ST.makeLIDRangeMetadata(LoadZU);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000251
252 // Extract y component. Upper half of LoadZU should be zero already.
253 Value *Y = Builder.CreateLShr(LoadXY, 16);
254
255 return std::make_pair(Y, LoadZU);
256}
257
258Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000259 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(
260 *Builder.GetInsertBlock()->getParent());
Matt Arsenaulte0132462016-01-30 05:19:45 +0000261 Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
262
263 switch (N) {
264 case 0:
265 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
266 : Intrinsic::r600_read_tidig_x;
267 break;
268 case 1:
269 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
270 : Intrinsic::r600_read_tidig_y;
271 break;
272
273 case 2:
274 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
275 : Intrinsic::r600_read_tidig_z;
276 break;
277 default:
278 llvm_unreachable("invalid dimension");
279 }
280
281 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
282 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000283 ST.makeLIDRangeMetadata(CI);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000284
285 return CI;
286}
287
Craig Toppere3dcce92015-08-01 22:20:21 +0000288static VectorType *arrayTypeToVecType(Type *ArrayTy) {
Tom Stellard880a80a2014-06-17 16:53:14 +0000289 return VectorType::get(ArrayTy->getArrayElementType(),
290 ArrayTy->getArrayNumElements());
291}
292
Benjamin Kramerc6cc58e2014-10-04 16:55:56 +0000293static Value *
294calculateVectorIndex(Value *Ptr,
295 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
Tom Stellard880a80a2014-06-17 16:53:14 +0000296 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
297
Benjamin Kramerc6cc58e2014-10-04 16:55:56 +0000298 auto I = GEPIdx.find(GEP);
299 return I == GEPIdx.end() ? nullptr : I->second;
Tom Stellard880a80a2014-06-17 16:53:14 +0000300}
301
302static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
303 // FIXME we only support simple cases
304 if (GEP->getNumOperands() != 3)
Matt Arsenaultefb24542016-07-18 18:34:53 +0000305 return nullptr;
Tom Stellard880a80a2014-06-17 16:53:14 +0000306
307 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
308 if (!I0 || !I0->isZero())
Matt Arsenaultefb24542016-07-18 18:34:53 +0000309 return nullptr;
Tom Stellard880a80a2014-06-17 16:53:14 +0000310
311 return GEP->getOperand(2);
312}
313
Matt Arsenault642d2e72014-06-27 16:52:49 +0000314// Not an instruction handled below to turn into a vector.
315//
316// TODO: Check isTriviallyVectorizable for calls and handle other
317// instructions.
Matt Arsenault7227cc12015-07-28 18:47:00 +0000318static bool canVectorizeInst(Instruction *Inst, User *User) {
Matt Arsenault642d2e72014-06-27 16:52:49 +0000319 switch (Inst->getOpcode()) {
Changpeng Fang161e8c39af2017-05-12 20:31:12 +0000320 case Instruction::Load: {
321 LoadInst *LI = cast<LoadInst>(Inst);
David Stuttard82618ba2017-06-09 14:16:22 +0000322 // Currently only handle the case where the Pointer Operand is a GEP so check for that case.
323 return isa<GetElementPtrInst>(LI->getPointerOperand()) && !LI->isVolatile();
Changpeng Fang161e8c39af2017-05-12 20:31:12 +0000324 }
Matt Arsenault642d2e72014-06-27 16:52:49 +0000325 case Instruction::BitCast:
326 case Instruction::AddrSpaceCast:
327 return true;
Matt Arsenault7227cc12015-07-28 18:47:00 +0000328 case Instruction::Store: {
David Stuttard82618ba2017-06-09 14:16:22 +0000329 // Must be the stored pointer operand, not a stored value, plus
330 // since it should be canonical form, the User should be a GEP.
Matt Arsenault7227cc12015-07-28 18:47:00 +0000331 StoreInst *SI = cast<StoreInst>(Inst);
David Stuttard82618ba2017-06-09 14:16:22 +0000332 return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && !SI->isVolatile();
Matt Arsenault7227cc12015-07-28 18:47:00 +0000333 }
Matt Arsenault642d2e72014-06-27 16:52:49 +0000334 default:
335 return false;
336 }
337}
338
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000339static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) {
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000340 ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType());
Tom Stellard880a80a2014-06-17 16:53:14 +0000341
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000342 DEBUG(dbgs() << "Alloca candidate for vectorization\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000343
344 // FIXME: There is no reason why we can't support larger arrays, we
345 // are just being conservative for now.
David Stuttard82618ba2017-06-09 14:16:22 +0000346 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
347 // could also be promoted but we don't currently handle this case
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000348 if (!AllocaTy ||
349 AllocaTy->getElementType()->isVectorTy() ||
David Stuttard82618ba2017-06-09 14:16:22 +0000350 AllocaTy->getElementType()->isArrayTy() ||
Matt Arsenaultefb24542016-07-18 18:34:53 +0000351 AllocaTy->getNumElements() > 4 ||
352 AllocaTy->getNumElements() < 2) {
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000353 DEBUG(dbgs() << " Cannot convert type to vector\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000354 return false;
355 }
356
357 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
358 std::vector<Value*> WorkList;
359 for (User *AllocaUser : Alloca->users()) {
360 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
361 if (!GEP) {
Matt Arsenault7227cc12015-07-28 18:47:00 +0000362 if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca))
Matt Arsenault642d2e72014-06-27 16:52:49 +0000363 return false;
364
Tom Stellard880a80a2014-06-17 16:53:14 +0000365 WorkList.push_back(AllocaUser);
366 continue;
367 }
368
369 Value *Index = GEPToVectorIndex(GEP);
370
371 // If we can't compute a vector index from this GEP, then we can't
372 // promote this alloca to vector.
373 if (!Index) {
Matt Arsenault6f62cf82014-06-27 02:36:59 +0000374 DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000375 return false;
376 }
377
378 GEPVectorIdx[GEP] = Index;
379 for (User *GEPUser : AllocaUser->users()) {
Matt Arsenault7227cc12015-07-28 18:47:00 +0000380 if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser))
Matt Arsenault642d2e72014-06-27 16:52:49 +0000381 return false;
382
Tom Stellard880a80a2014-06-17 16:53:14 +0000383 WorkList.push_back(GEPUser);
384 }
385 }
386
387 VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
388
Matt Arsenault6f62cf82014-06-27 02:36:59 +0000389 DEBUG(dbgs() << " Converting alloca to vector "
390 << *AllocaTy << " -> " << *VectorTy << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000391
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000392 for (Value *V : WorkList) {
393 Instruction *Inst = cast<Instruction>(V);
Tom Stellard880a80a2014-06-17 16:53:14 +0000394 IRBuilder<> Builder(Inst);
395 switch (Inst->getOpcode()) {
396 case Instruction::Load: {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000397 Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS);
David Stuttard82618ba2017-06-09 14:16:22 +0000398 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
Tom Stellard880a80a2014-06-17 16:53:14 +0000399 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
Matt Arsenaultefb24542016-07-18 18:34:53 +0000400
401 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
Tom Stellard880a80a2014-06-17 16:53:14 +0000402 Value *VecValue = Builder.CreateLoad(BitCast);
403 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
404 Inst->replaceAllUsesWith(ExtractElement);
405 Inst->eraseFromParent();
406 break;
407 }
408 case Instruction::Store: {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000409 Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS);
Matt Arsenaultefb24542016-07-18 18:34:53 +0000410
David Stuttard82618ba2017-06-09 14:16:22 +0000411 StoreInst *SI = cast<StoreInst>(Inst);
412 Value *Ptr = SI->getPointerOperand();
Tom Stellard880a80a2014-06-17 16:53:14 +0000413 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
Matt Arsenaultefb24542016-07-18 18:34:53 +0000414 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
Tom Stellard880a80a2014-06-17 16:53:14 +0000415 Value *VecValue = Builder.CreateLoad(BitCast);
416 Value *NewVecValue = Builder.CreateInsertElement(VecValue,
David Stuttard82618ba2017-06-09 14:16:22 +0000417 SI->getValueOperand(),
Tom Stellard880a80a2014-06-17 16:53:14 +0000418 Index);
419 Builder.CreateStore(NewVecValue, BitCast);
420 Inst->eraseFromParent();
421 break;
422 }
423 case Instruction::BitCast:
Matt Arsenault642d2e72014-06-27 16:52:49 +0000424 case Instruction::AddrSpaceCast:
Tom Stellard880a80a2014-06-17 16:53:14 +0000425 break;
426
427 default:
Matt Arsenault642d2e72014-06-27 16:52:49 +0000428 llvm_unreachable("Inconsistency in instructions promotable to vector");
Tom Stellard880a80a2014-06-17 16:53:14 +0000429 }
430 }
431 return true;
432}
433
Matt Arsenaultad134842016-02-02 19:18:53 +0000434static bool isCallPromotable(CallInst *CI) {
Matt Arsenaultad134842016-02-02 19:18:53 +0000435 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
436 if (!II)
437 return false;
438
439 switch (II->getIntrinsicID()) {
440 case Intrinsic::memcpy:
Matt Arsenault7e747f12016-02-02 20:28:10 +0000441 case Intrinsic::memmove:
Matt Arsenaultad134842016-02-02 19:18:53 +0000442 case Intrinsic::memset:
443 case Intrinsic::lifetime_start:
444 case Intrinsic::lifetime_end:
445 case Intrinsic::invariant_start:
446 case Intrinsic::invariant_end:
447 case Intrinsic::invariant_group_barrier:
Matt Arsenault7e747f12016-02-02 20:28:10 +0000448 case Intrinsic::objectsize:
Matt Arsenaultad134842016-02-02 19:18:53 +0000449 return true;
450 default:
451 return false;
452 }
453}
454
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000455bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
456 Value *Val,
457 Instruction *Inst,
458 int OpIdx0,
459 int OpIdx1) const {
460 // Figure out which operand is the one we might not be promoting.
461 Value *OtherOp = Inst->getOperand(OpIdx0);
462 if (Val == OtherOp)
463 OtherOp = Inst->getOperand(OpIdx1);
464
Matt Arsenault891fccc2016-05-18 15:57:21 +0000465 if (isa<ConstantPointerNull>(OtherOp))
466 return true;
467
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000468 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
469 if (!isa<AllocaInst>(OtherObj))
470 return false;
471
472 // TODO: We should be able to replace undefs with the right pointer type.
473
474 // TODO: If we know the other base object is another promotable
475 // alloca, not necessarily this alloca, we can do this. The
476 // important part is both must have the same address space at
477 // the end.
478 if (OtherObj != BaseAlloca) {
479 DEBUG(dbgs() << "Found a binary instruction with another alloca object\n");
480 return false;
481 }
482
483 return true;
484}
485
486bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
487 Value *BaseAlloca,
488 Value *Val,
489 std::vector<Value*> &WorkList) const {
490
Tom Stellard880a80a2014-06-17 16:53:14 +0000491 for (User *User : Val->users()) {
David Majnemer0d955d02016-08-11 22:21:41 +0000492 if (is_contained(WorkList, User))
Tom Stellard880a80a2014-06-17 16:53:14 +0000493 continue;
Matt Arsenaultad134842016-02-02 19:18:53 +0000494
Matt Arsenaultfdcd39a2015-07-28 18:29:14 +0000495 if (CallInst *CI = dyn_cast<CallInst>(User)) {
Matt Arsenaultad134842016-02-02 19:18:53 +0000496 if (!isCallPromotable(CI))
Matt Arsenaultfdcd39a2015-07-28 18:29:14 +0000497 return false;
498
Tom Stellard880a80a2014-06-17 16:53:14 +0000499 WorkList.push_back(User);
500 continue;
501 }
Tom Stellard5b2927f2014-10-31 20:52:04 +0000502
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000503 Instruction *UseInst = cast<Instruction>(User);
504 if (UseInst->getOpcode() == Instruction::PtrToInt)
Tom Stellard5b2927f2014-10-31 20:52:04 +0000505 return false;
506
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000507 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
Matt Arsenaultc438ef52016-05-18 23:20:24 +0000508 if (LI->isVolatile())
509 return false;
510
511 continue;
512 }
513
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000514 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000515 if (SI->isVolatile())
516 return false;
517
Matt Arsenault7227cc12015-07-28 18:47:00 +0000518 // Reject if the stored value is not the pointer operand.
519 if (SI->getPointerOperand() != Val)
520 return false;
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000521 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000522 if (RMW->isVolatile())
523 return false;
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000524 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000525 if (CAS->isVolatile())
526 return false;
Matt Arsenault7227cc12015-07-28 18:47:00 +0000527 }
528
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000529 // Only promote a select if we know that the other select operand
530 // is from another pointer that will also be promoted.
531 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
532 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
533 return false;
Matt Arsenault891fccc2016-05-18 15:57:21 +0000534
535 // May need to rewrite constant operands.
536 WorkList.push_back(ICmp);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000537 }
538
Matt Arsenault2402b952016-12-10 00:52:50 +0000539 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
Changpeng Fangc85abbd2017-01-24 19:06:28 +0000540 // Give up if the pointer may be captured.
541 if (PointerMayBeCaptured(UseInst, true, true))
542 return false;
Matt Arsenault2402b952016-12-10 00:52:50 +0000543 // Don't collect the users of this.
544 WorkList.push_back(User);
545 continue;
546 }
547
Tom Stellard880a80a2014-06-17 16:53:14 +0000548 if (!User->getType()->isPointerTy())
549 continue;
Tom Stellard5b2927f2014-10-31 20:52:04 +0000550
Matt Arsenaultde420812016-02-02 21:16:12 +0000551 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
552 // Be conservative if an address could be computed outside the bounds of
553 // the alloca.
554 if (!GEP->isInBounds())
555 return false;
556 }
557
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000558 // Only promote a select if we know that the other select operand is from
559 // another pointer that will also be promoted.
560 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
561 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
562 return false;
563 }
564
565 // Repeat for phis.
566 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
567 // TODO: Handle more complex cases. We should be able to replace loops
568 // over arrays.
569 switch (Phi->getNumIncomingValues()) {
570 case 1:
571 break;
572 case 2:
573 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
574 return false;
575 break;
576 default:
577 return false;
578 }
579 }
580
Tom Stellard880a80a2014-06-17 16:53:14 +0000581 WorkList.push_back(User);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000582 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
Matt Arsenaultad134842016-02-02 19:18:53 +0000583 return false;
Tom Stellard880a80a2014-06-17 16:53:14 +0000584 }
Matt Arsenaultad134842016-02-02 19:18:53 +0000585
586 return true;
Tom Stellard880a80a2014-06-17 16:53:14 +0000587}
588
Changpeng Fang1dbace12017-05-23 20:25:41 +0000589bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
590
591 FunctionType *FTy = F.getFunctionType();
592 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
593
594 // If the function has any arguments in the local address space, then it's
595 // possible these arguments require the entire local memory space, so
596 // we cannot use local memory in the pass.
597 for (Type *ParamTy : FTy->params()) {
598 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
599 if (PtrTy && PtrTy->getAddressSpace() == AS.LOCAL_ADDRESS) {
600 LocalMemLimit = 0;
601 DEBUG(dbgs() << "Function has local memory argument. Promoting to "
602 "local memory disabled.\n");
603 return false;
604 }
605 }
606
607 LocalMemLimit = ST.getLocalMemorySize();
608 if (LocalMemLimit == 0)
609 return false;
610
611 const DataLayout &DL = Mod->getDataLayout();
612
613 // Check how much local memory is being used by global objects
614 CurrentLocalMemUsage = 0;
615 for (GlobalVariable &GV : Mod->globals()) {
616 if (GV.getType()->getAddressSpace() != AS.LOCAL_ADDRESS)
617 continue;
618
619 for (const User *U : GV.users()) {
620 const Instruction *Use = dyn_cast<Instruction>(U);
621 if (!Use)
622 continue;
623
624 if (Use->getParent()->getParent() == &F) {
625 unsigned Align = GV.getAlignment();
626 if (Align == 0)
627 Align = DL.getABITypeAlignment(GV.getValueType());
628
629 // FIXME: Try to account for padding here. The padding is currently
630 // determined from the inverse order of uses in the function. I'm not
631 // sure if the use list order is in any way connected to this, so the
632 // total reported size is likely incorrect.
633 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
634 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
635 CurrentLocalMemUsage += AllocSize;
636 break;
637 }
638 }
639 }
640
641 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
642 F);
643
644 // Restrict local memory usage so that we don't drastically reduce occupancy,
645 // unless it is already significantly reduced.
646
647 // TODO: Have some sort of hint or other heuristics to guess occupancy based
648 // on other factors..
649 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
650 if (OccupancyHint == 0)
651 OccupancyHint = 7;
652
653 // Clamp to max value.
654 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
655
656 // Check the hint but ignore it if it's obviously wrong from the existing LDS
657 // usage.
658 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
659
660
661 // Round up to the next tier of usage.
662 unsigned MaxSizeWithWaveCount
663 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
664
665 // Program is possibly broken by using more local mem than available.
666 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
667 return false;
668
669 LocalMemLimit = MaxSizeWithWaveCount;
670
671 DEBUG(
672 dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n"
673 << " Rounding size to " << MaxSizeWithWaveCount
674 << " with a maximum occupancy of " << MaxOccupancy << '\n'
675 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
676 << " available for promotion\n"
677 );
678
679 return true;
680}
681
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000682// FIXME: Should try to pick the most likely to be profitable allocas first.
Changpeng Fang1dbace12017-05-23 20:25:41 +0000683bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
Matt Arsenaultc5fce692016-04-28 18:38:48 +0000684 // Array allocations are probably not worth handling, since an allocation of
685 // the array type is the canonical form.
686 if (!I.isStaticAlloca() || I.isArrayAllocation())
Changpeng Fang1dbace12017-05-23 20:25:41 +0000687 return false;
Matt Arsenault19c54882015-08-26 18:37:13 +0000688
Tom Stellard880a80a2014-06-17 16:53:14 +0000689 IRBuilder<> Builder(&I);
690
691 // First try to replace the alloca with a vector
692 Type *AllocaTy = I.getAllocatedType();
693
Matt Arsenault6f62cf82014-06-27 02:36:59 +0000694 DEBUG(dbgs() << "Trying to promote " << I << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000695
Changpeng Fang1dbace12017-05-23 20:25:41 +0000696 if (tryPromoteAllocaToVector(&I, AS))
697 return true; // Promoted to vector.
Tom Stellard880a80a2014-06-17 16:53:14 +0000698
Tom Stellard79a1fd72016-04-14 16:27:07 +0000699 const Function &ContainingFunction = *I.getParent()->getParent();
Matt Arsenault5c806182017-05-02 18:33:18 +0000700 CallingConv::ID CC = ContainingFunction.getCallingConv();
Tom Stellard79a1fd72016-04-14 16:27:07 +0000701
Nicolai Haehnlebef1ceb2016-07-18 09:02:47 +0000702 // Don't promote the alloca to LDS for shader calling conventions as the work
703 // item ID intrinsics are not supported for these calling conventions.
704 // Furthermore not all LDS is available for some of the stages.
Matt Arsenault5c806182017-05-02 18:33:18 +0000705 switch (CC) {
706 case CallingConv::AMDGPU_KERNEL:
707 case CallingConv::SPIR_KERNEL:
708 break;
709 default:
710 DEBUG(dbgs() << " promote alloca to LDS not supported with calling convention.\n");
Changpeng Fang1dbace12017-05-23 20:25:41 +0000711 return false;
Matt Arsenault5c806182017-05-02 18:33:18 +0000712 }
Nicolai Haehnlebef1ceb2016-07-18 09:02:47 +0000713
Changpeng Fang1dbace12017-05-23 20:25:41 +0000714 // Not likely to have sufficient local memory for promotion.
715 if (!SufficientLDS)
716 return false;
717
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000718 const AMDGPUSubtarget &ST =
719 TM->getSubtarget<AMDGPUSubtarget>(ContainingFunction);
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000720 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000721
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000722 const DataLayout &DL = Mod->getDataLayout();
Tom Stellard880a80a2014-06-17 16:53:14 +0000723
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000724 unsigned Align = I.getAlignment();
725 if (Align == 0)
726 Align = DL.getABITypeAlignment(I.getAllocatedType());
727
728 // FIXME: This computed padding is likely wrong since it depends on inverse
729 // usage order.
730 //
731 // FIXME: It is also possible that if we're allowed to use all of the memory
732 // could could end up using more than the maximum due to alignment padding.
733
734 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
735 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
736 NewSize += AllocSize;
737
738 if (NewSize > LocalMemLimit) {
739 DEBUG(dbgs() << " " << AllocSize
740 << " bytes of local memory not available to promote\n");
Changpeng Fang1dbace12017-05-23 20:25:41 +0000741 return false;
Tom Stellard880a80a2014-06-17 16:53:14 +0000742 }
743
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000744 CurrentLocalMemUsage = NewSize;
745
Tom Stellard5b2927f2014-10-31 20:52:04 +0000746 std::vector<Value*> WorkList;
747
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000748 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
Tom Stellard5b2927f2014-10-31 20:52:04 +0000749 DEBUG(dbgs() << " Do not know how to convert all uses\n");
Changpeng Fang1dbace12017-05-23 20:25:41 +0000750 return false;
Tom Stellard5b2927f2014-10-31 20:52:04 +0000751 }
752
Tom Stellard880a80a2014-06-17 16:53:14 +0000753 DEBUG(dbgs() << "Promoting alloca to local memory\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000754
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000755 Function *F = I.getParent()->getParent();
756
Tom Stellard79a1fd72016-04-14 16:27:07 +0000757 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
Tom Stellard880a80a2014-06-17 16:53:14 +0000758 GlobalVariable *GV = new GlobalVariable(
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000759 *Mod, GVTy, false, GlobalValue::InternalLinkage,
760 UndefValue::get(GVTy),
761 Twine(F->getName()) + Twine('.') + I.getName(),
762 nullptr,
763 GlobalVariable::NotThreadLocal,
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000764 AS.LOCAL_ADDRESS);
Peter Collingbourne96efdd62016-06-14 21:01:22 +0000765 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000766 GV->setAlignment(I.getAlignment());
Tom Stellard880a80a2014-06-17 16:53:14 +0000767
Matt Arsenaulte0132462016-01-30 05:19:45 +0000768 Value *TCntY, *TCntZ;
Tom Stellard880a80a2014-06-17 16:53:14 +0000769
Matt Arsenaulte0132462016-01-30 05:19:45 +0000770 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
771 Value *TIdX = getWorkitemID(Builder, 0);
772 Value *TIdY = getWorkitemID(Builder, 1);
773 Value *TIdZ = getWorkitemID(Builder, 2);
Tom Stellard880a80a2014-06-17 16:53:14 +0000774
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000775 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
Tom Stellard880a80a2014-06-17 16:53:14 +0000776 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000777 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
Tom Stellard880a80a2014-06-17 16:53:14 +0000778 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
779 TID = Builder.CreateAdd(TID, TIdZ);
780
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000781 Value *Indices[] = {
782 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
783 TID
784 };
Tom Stellard880a80a2014-06-17 16:53:14 +0000785
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000786 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
Tom Stellard880a80a2014-06-17 16:53:14 +0000787 I.mutateType(Offset->getType());
788 I.replaceAllUsesWith(Offset);
789 I.eraseFromParent();
790
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000791 for (Value *V : WorkList) {
Tom Stellard880a80a2014-06-17 16:53:14 +0000792 CallInst *Call = dyn_cast<CallInst>(V);
793 if (!Call) {
Matt Arsenault891fccc2016-05-18 15:57:21 +0000794 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
795 Value *Src0 = CI->getOperand(0);
796 Type *EltTy = Src0->getType()->getPointerElementType();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000797 PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS);
Matt Arsenault891fccc2016-05-18 15:57:21 +0000798
799 if (isa<ConstantPointerNull>(CI->getOperand(0)))
800 CI->setOperand(0, ConstantPointerNull::get(NewTy));
801
802 if (isa<ConstantPointerNull>(CI->getOperand(1)))
803 CI->setOperand(1, ConstantPointerNull::get(NewTy));
804
805 continue;
806 }
Matt Arsenault65f67e42014-09-15 15:41:44 +0000807
Matt Arsenault2402b952016-12-10 00:52:50 +0000808 // The operand's value should be corrected on its own and we don't want to
809 // touch the users.
Matt Arsenault65f67e42014-09-15 15:41:44 +0000810 if (isa<AddrSpaceCastInst>(V))
811 continue;
812
Matt Arsenault891fccc2016-05-18 15:57:21 +0000813 Type *EltTy = V->getType()->getPointerElementType();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000814 PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS);
Matt Arsenault891fccc2016-05-18 15:57:21 +0000815
Matt Arsenault65f67e42014-09-15 15:41:44 +0000816 // FIXME: It doesn't really make sense to try to do this for all
817 // instructions.
Tom Stellard880a80a2014-06-17 16:53:14 +0000818 V->mutateType(NewTy);
Matt Arsenault891fccc2016-05-18 15:57:21 +0000819
820 // Adjust the types of any constant operands.
821 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
822 if (isa<ConstantPointerNull>(SI->getOperand(1)))
823 SI->setOperand(1, ConstantPointerNull::get(NewTy));
824
825 if (isa<ConstantPointerNull>(SI->getOperand(2)))
826 SI->setOperand(2, ConstantPointerNull::get(NewTy));
827 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
828 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
829 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
830 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
831 }
832 }
833
Tom Stellard880a80a2014-06-17 16:53:14 +0000834 continue;
835 }
836
Matt Arsenault2e08e182016-07-18 18:34:48 +0000837 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
Tom Stellard880a80a2014-06-17 16:53:14 +0000838 Builder.SetInsertPoint(Intr);
839 switch (Intr->getIntrinsicID()) {
840 case Intrinsic::lifetime_start:
841 case Intrinsic::lifetime_end:
842 // These intrinsics are for address space 0 only
843 Intr->eraseFromParent();
844 continue;
845 case Intrinsic::memcpy: {
846 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
847 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
Pete Cooper67cf9a72015-11-19 05:56:52 +0000848 MemCpy->getLength(), MemCpy->getAlignment(),
849 MemCpy->isVolatile());
Tom Stellard880a80a2014-06-17 16:53:14 +0000850 Intr->eraseFromParent();
851 continue;
852 }
Matt Arsenault7e747f12016-02-02 20:28:10 +0000853 case Intrinsic::memmove: {
854 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
855 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(),
856 MemMove->getLength(), MemMove->getAlignment(),
857 MemMove->isVolatile());
858 Intr->eraseFromParent();
859 continue;
860 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000861 case Intrinsic::memset: {
862 MemSetInst *MemSet = cast<MemSetInst>(Intr);
863 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
Pete Cooper67cf9a72015-11-19 05:56:52 +0000864 MemSet->getLength(), MemSet->getAlignment(),
Tom Stellard880a80a2014-06-17 16:53:14 +0000865 MemSet->isVolatile());
866 Intr->eraseFromParent();
867 continue;
868 }
Matt Arsenault0b783ef02016-01-22 19:47:54 +0000869 case Intrinsic::invariant_start:
870 case Intrinsic::invariant_end:
871 case Intrinsic::invariant_group_barrier:
872 Intr->eraseFromParent();
873 // FIXME: I think the invariant marker should still theoretically apply,
874 // but the intrinsics need to be changed to accept pointers with any
875 // address space.
876 continue;
Matt Arsenault7e747f12016-02-02 20:28:10 +0000877 case Intrinsic::objectsize: {
878 Value *Src = Intr->getOperand(0);
879 Type *SrcTy = Src->getType()->getPointerElementType();
880 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
881 Intrinsic::objectsize,
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000882 { Intr->getType(), PointerType::get(SrcTy, AS.LOCAL_ADDRESS) }
Matt Arsenault7e747f12016-02-02 20:28:10 +0000883 );
884
George Burgess IV56c7e882017-03-21 20:08:59 +0000885 CallInst *NewCall = Builder.CreateCall(
886 ObjectSize, {Src, Intr->getOperand(1), Intr->getOperand(2)});
Matt Arsenault7e747f12016-02-02 20:28:10 +0000887 Intr->replaceAllUsesWith(NewCall);
888 Intr->eraseFromParent();
889 continue;
890 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000891 default:
Matthias Braun8c209aa2017-01-28 02:02:38 +0000892 Intr->print(errs());
Tom Stellard880a80a2014-06-17 16:53:14 +0000893 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
894 }
895 }
Changpeng Fang1dbace12017-05-23 20:25:41 +0000896 return true;
Tom Stellard880a80a2014-06-17 16:53:14 +0000897}
898
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000899FunctionPass *llvm::createAMDGPUPromoteAlloca() {
900 return new AMDGPUPromoteAlloca();
Tom Stellard880a80a2014-06-17 16:53:14 +0000901}