blob: fe9e4ca0ca4c7727253ad21ed0c55081c1063dd8 [file] [log] [blame]
Tom Stellard880a80a2014-06-17 16:53:14 +00001//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass eliminates allocas by either converting them into vectors or
11// by migrating them to local address space.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUSubtarget.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000017#include "Utils/AMDGPUBaseInfo.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Triple.h"
23#include "llvm/ADT/Twine.h"
Changpeng Fangc85abbd2017-01-24 19:06:28 +000024#include "llvm/Analysis/CaptureTracking.h"
Tom Stellard880a80a2014-06-17 16:53:14 +000025#include "llvm/Analysis/ValueTracking.h"
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +000026#include "llvm/CodeGen/TargetPassConfig.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000027#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/Constant.h"
30#include "llvm/IR/Constants.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/GlobalVariable.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000036#include "llvm/IR/IRBuilder.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000037#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +000039#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000040#include "llvm/IR/Intrinsics.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000041#include "llvm/IR/LLVMContext.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000042#include "llvm/IR/Metadata.h"
43#include "llvm/IR/Module.h"
44#include "llvm/IR/Type.h"
45#include "llvm/IR/User.h"
46#include "llvm/IR/Value.h"
47#include "llvm/Pass.h"
48#include "llvm/Support/Casting.h"
Tom Stellard880a80a2014-06-17 16:53:14 +000049#include "llvm/Support/Debug.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000050#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/MathExtras.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000052#include "llvm/Support/raw_ostream.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000053#include "llvm/Target/TargetMachine.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <map>
58#include <tuple>
59#include <utility>
60#include <vector>
Tom Stellard880a80a2014-06-17 16:53:14 +000061
62#define DEBUG_TYPE "amdgpu-promote-alloca"
63
64using namespace llvm;
65
66namespace {
67
Changpeng Fangba920592018-02-16 19:14:17 +000068static cl::opt<bool> DisablePromoteAllocaToVector(
69 "disable-promote-alloca-to-vector",
70 cl::desc("Disable promote alloca to vector"),
71 cl::init(false));
72
Matt Arsenaulte0132462016-01-30 05:19:45 +000073// FIXME: This can create globals so should be a module pass.
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +000074class AMDGPUPromoteAlloca : public FunctionPass {
Matt Arsenaulte0132462016-01-30 05:19:45 +000075private:
76 const TargetMachine *TM;
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000077 Module *Mod = nullptr;
78 const DataLayout *DL = nullptr;
Matt Arsenaulte0132462016-01-30 05:19:45 +000079
80 // FIXME: This should be per-kernel.
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000081 uint32_t LocalMemLimit = 0;
82 uint32_t CurrentLocalMemUsage = 0;
Tom Stellard880a80a2014-06-17 16:53:14 +000083
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000084 bool IsAMDGCN = false;
85 bool IsAMDHSA = false;
Matt Arsenaulte0132462016-01-30 05:19:45 +000086
87 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
88 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
89
Matt Arsenaulta61cb482016-05-12 01:58:58 +000090 /// BaseAlloca is the alloca root the search started from.
91 /// Val may be that alloca or a recursive user of it.
92 bool collectUsesWithPtrTypes(Value *BaseAlloca,
93 Value *Val,
94 std::vector<Value*> &WorkList) const;
95
96 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
97 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
98 /// Returns true if both operands are derived from the same alloca. Val should
99 /// be the same value as one of the input operands of UseInst.
100 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
101 Instruction *UseInst,
102 int OpIdx0, int OpIdx1) const;
103
Changpeng Fang1dbace12017-05-23 20:25:41 +0000104 /// Check whether we have enough local memory for promotion.
105 bool hasSufficientLocalMem(const Function &F);
106
Tom Stellard880a80a2014-06-17 16:53:14 +0000107public:
Matt Arsenaulte0132462016-01-30 05:19:45 +0000108 static char ID;
109
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000110 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
Matt Arsenaulte0132462016-01-30 05:19:45 +0000111
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000112 bool doInitialization(Module &M) override;
113 bool runOnFunction(Function &F) override;
Matt Arsenaulte0132462016-01-30 05:19:45 +0000114
Mehdi Amini117296c2016-10-01 02:56:57 +0000115 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
Matt Arsenaulte0132462016-01-30 05:19:45 +0000116
Changpeng Fang1dbace12017-05-23 20:25:41 +0000117 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000118
119 void getAnalysisUsage(AnalysisUsage &AU) const override {
120 AU.setPreservesCFG();
121 FunctionPass::getAnalysisUsage(AU);
122 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000123};
124
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000125} // end anonymous namespace
Tom Stellard880a80a2014-06-17 16:53:14 +0000126
127char AMDGPUPromoteAlloca::ID = 0;
128
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000129INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
130 "AMDGPU promote alloca to vector or LDS", false, false)
Matt Arsenaulte0132462016-01-30 05:19:45 +0000131
132char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
133
Tom Stellard880a80a2014-06-17 16:53:14 +0000134bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
135 Mod = &M;
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000136 DL = &Mod->getDataLayout();
Matt Arsenaulte0132462016-01-30 05:19:45 +0000137
Tom Stellard880a80a2014-06-17 16:53:14 +0000138 return false;
139}
140
141bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000142 if (skipFunction(F))
Matt Arsenaulte0132462016-01-30 05:19:45 +0000143 return false;
144
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000145 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
146 TM = &TPC->getTM<TargetMachine>();
147 else
148 return false;
149
150 const Triple &TT = TM->getTargetTriple();
151 IsAMDGCN = TT.getArch() == Triple::amdgcn;
152 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
153
Tom Stellard5bfbae52018-07-11 20:59:01 +0000154 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
Matt Arsenault03d85842016-06-27 20:32:13 +0000155 if (!ST.isPromoteAllocaEnabled())
156 return false;
Changpeng Fang1dbace12017-05-23 20:25:41 +0000157
Changpeng Fang1dbace12017-05-23 20:25:41 +0000158 bool SufficientLDS = hasSufficientLocalMem(F);
159 bool Changed = false;
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +0000160 BasicBlock &EntryBB = *F.begin();
161 for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
162 AllocaInst *AI = dyn_cast<AllocaInst>(I);
163
164 ++I;
165 if (AI)
Changpeng Fang1dbace12017-05-23 20:25:41 +0000166 Changed |= handleAlloca(*AI, SufficientLDS);
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +0000167 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000168
Changpeng Fang1dbace12017-05-23 20:25:41 +0000169 return Changed;
Tom Stellard880a80a2014-06-17 16:53:14 +0000170}
171
Matt Arsenaulte0132462016-01-30 05:19:45 +0000172std::pair<Value *, Value *>
173AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000174 const Function &F = *Builder.GetInsertBlock()->getParent();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000175 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000176
Matt Arsenaulte0132462016-01-30 05:19:45 +0000177 if (!IsAMDHSA) {
178 Function *LocalSizeYFn
179 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
180 Function *LocalSizeZFn
181 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
182
183 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
184 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
185
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000186 ST.makeLIDRangeMetadata(LocalSizeY);
187 ST.makeLIDRangeMetadata(LocalSizeZ);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000188
189 return std::make_pair(LocalSizeY, LocalSizeZ);
190 }
191
192 // We must read the size out of the dispatch pointer.
193 assert(IsAMDGCN);
194
195 // We are indexing into this struct, and want to extract the workgroup_size_*
196 // fields.
197 //
198 // typedef struct hsa_kernel_dispatch_packet_s {
199 // uint16_t header;
200 // uint16_t setup;
201 // uint16_t workgroup_size_x ;
202 // uint16_t workgroup_size_y;
203 // uint16_t workgroup_size_z;
204 // uint16_t reserved0;
205 // uint32_t grid_size_x ;
206 // uint32_t grid_size_y ;
207 // uint32_t grid_size_z;
208 //
209 // uint32_t private_segment_size;
210 // uint32_t group_segment_size;
211 // uint64_t kernel_object;
212 //
213 // #ifdef HSA_LARGE_MODEL
214 // void *kernarg_address;
215 // #elif defined HSA_LITTLE_ENDIAN
216 // void *kernarg_address;
217 // uint32_t reserved1;
218 // #else
219 // uint32_t reserved1;
220 // void *kernarg_address;
221 // #endif
222 // uint64_t reserved2;
223 // hsa_signal_t completion_signal; // uint64_t wrapper
224 // } hsa_kernel_dispatch_packet_t
225 //
226 Function *DispatchPtrFn
227 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
228
229 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
Reid Klecknerb5180542017-03-21 16:57:19 +0000230 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
231 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000232
233 // Size of the dispatch packet struct.
Reid Klecknerb5180542017-03-21 16:57:19 +0000234 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000235
236 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
237 Value *CastDispatchPtr = Builder.CreateBitCast(
Matt Arsenault0da63502018-08-31 05:49:54 +0000238 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
Matt Arsenaulte0132462016-01-30 05:19:45 +0000239
240 // We could do a single 64-bit load here, but it's likely that the basic
241 // 32-bit and extract sequence is already present, and it is probably easier
242 // to CSE this. The loads should be mergable later anyway.
243 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
244 LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
245
246 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
247 LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
248
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000249 MDNode *MD = MDNode::get(Mod->getContext(), None);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000250 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
251 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000252 ST.makeLIDRangeMetadata(LoadZU);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000253
254 // Extract y component. Upper half of LoadZU should be zero already.
255 Value *Y = Builder.CreateLShr(LoadXY, 16);
256
257 return std::make_pair(Y, LoadZU);
258}
259
260Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000261 const AMDGPUSubtarget &ST =
262 AMDGPUSubtarget::get(*TM, *Builder.GetInsertBlock()->getParent());
Matt Arsenaulte0132462016-01-30 05:19:45 +0000263 Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
264
265 switch (N) {
266 case 0:
267 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
268 : Intrinsic::r600_read_tidig_x;
269 break;
270 case 1:
271 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
272 : Intrinsic::r600_read_tidig_y;
273 break;
274
275 case 2:
276 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
277 : Intrinsic::r600_read_tidig_z;
278 break;
279 default:
280 llvm_unreachable("invalid dimension");
281 }
282
283 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
284 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000285 ST.makeLIDRangeMetadata(CI);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000286
287 return CI;
288}
289
Matt Arsenault37ab4cf2017-09-14 18:02:29 +0000290static VectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
291 return VectorType::get(ArrayTy->getElementType(),
292 ArrayTy->getNumElements());
Tom Stellard880a80a2014-06-17 16:53:14 +0000293}
294
Benjamin Kramerc6cc58e2014-10-04 16:55:56 +0000295static Value *
296calculateVectorIndex(Value *Ptr,
297 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
Tom Stellard880a80a2014-06-17 16:53:14 +0000298 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
299
Benjamin Kramerc6cc58e2014-10-04 16:55:56 +0000300 auto I = GEPIdx.find(GEP);
301 return I == GEPIdx.end() ? nullptr : I->second;
Tom Stellard880a80a2014-06-17 16:53:14 +0000302}
303
304static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
305 // FIXME we only support simple cases
306 if (GEP->getNumOperands() != 3)
Matt Arsenaultefb24542016-07-18 18:34:53 +0000307 return nullptr;
Tom Stellard880a80a2014-06-17 16:53:14 +0000308
309 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
310 if (!I0 || !I0->isZero())
Matt Arsenaultefb24542016-07-18 18:34:53 +0000311 return nullptr;
Tom Stellard880a80a2014-06-17 16:53:14 +0000312
313 return GEP->getOperand(2);
314}
315
Matt Arsenault642d2e72014-06-27 16:52:49 +0000316// Not an instruction handled below to turn into a vector.
317//
318// TODO: Check isTriviallyVectorizable for calls and handle other
319// instructions.
Matt Arsenault7227cc12015-07-28 18:47:00 +0000320static bool canVectorizeInst(Instruction *Inst, User *User) {
Matt Arsenault642d2e72014-06-27 16:52:49 +0000321 switch (Inst->getOpcode()) {
Changpeng Fang161e8c39af2017-05-12 20:31:12 +0000322 case Instruction::Load: {
Changpeng Fang860d4602018-05-17 21:49:44 +0000323 // Currently only handle the case where the Pointer Operand is a GEP.
324 // Also we could not vectorize volatile or atomic loads.
Changpeng Fang161e8c39af2017-05-12 20:31:12 +0000325 LoadInst *LI = cast<LoadInst>(Inst);
Changpeng Fang860d4602018-05-17 21:49:44 +0000326 return isa<GetElementPtrInst>(LI->getPointerOperand()) && LI->isSimple();
Changpeng Fang161e8c39af2017-05-12 20:31:12 +0000327 }
Matt Arsenault642d2e72014-06-27 16:52:49 +0000328 case Instruction::BitCast:
Matt Arsenault642d2e72014-06-27 16:52:49 +0000329 return true;
Matt Arsenault7227cc12015-07-28 18:47:00 +0000330 case Instruction::Store: {
David Stuttard82618ba2017-06-09 14:16:22 +0000331 // Must be the stored pointer operand, not a stored value, plus
332 // since it should be canonical form, the User should be a GEP.
Changpeng Fang860d4602018-05-17 21:49:44 +0000333 // Also we could not vectorize volatile or atomic stores.
Matt Arsenault7227cc12015-07-28 18:47:00 +0000334 StoreInst *SI = cast<StoreInst>(Inst);
Changpeng Fang860d4602018-05-17 21:49:44 +0000335 return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && SI->isSimple();
Matt Arsenault7227cc12015-07-28 18:47:00 +0000336 }
Matt Arsenault642d2e72014-06-27 16:52:49 +0000337 default:
338 return false;
339 }
340}
341
Matt Arsenault0da63502018-08-31 05:49:54 +0000342static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
Changpeng Fangba920592018-02-16 19:14:17 +0000343
344 if (DisablePromoteAllocaToVector) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000345 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
Changpeng Fangba920592018-02-16 19:14:17 +0000346 return false;
347 }
348
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000349 ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType());
Tom Stellard880a80a2014-06-17 16:53:14 +0000350
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000351 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000352
353 // FIXME: There is no reason why we can't support larger arrays, we
354 // are just being conservative for now.
David Stuttard82618ba2017-06-09 14:16:22 +0000355 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
356 // could also be promoted but we don't currently handle this case
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000357 if (!AllocaTy ||
Changpeng Fangba920592018-02-16 19:14:17 +0000358 AllocaTy->getNumElements() > 16 ||
Matt Arsenault37ab4cf2017-09-14 18:02:29 +0000359 AllocaTy->getNumElements() < 2 ||
360 !VectorType::isValidElementType(AllocaTy->getElementType())) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000361 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000362 return false;
363 }
364
365 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
366 std::vector<Value*> WorkList;
367 for (User *AllocaUser : Alloca->users()) {
368 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
369 if (!GEP) {
Matt Arsenault7227cc12015-07-28 18:47:00 +0000370 if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca))
Matt Arsenault642d2e72014-06-27 16:52:49 +0000371 return false;
372
Tom Stellard880a80a2014-06-17 16:53:14 +0000373 WorkList.push_back(AllocaUser);
374 continue;
375 }
376
377 Value *Index = GEPToVectorIndex(GEP);
378
379 // If we can't compute a vector index from this GEP, then we can't
380 // promote this alloca to vector.
381 if (!Index) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000382 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
383 << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000384 return false;
385 }
386
387 GEPVectorIdx[GEP] = Index;
388 for (User *GEPUser : AllocaUser->users()) {
Matt Arsenault7227cc12015-07-28 18:47:00 +0000389 if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser))
Matt Arsenault642d2e72014-06-27 16:52:49 +0000390 return false;
391
Tom Stellard880a80a2014-06-17 16:53:14 +0000392 WorkList.push_back(GEPUser);
393 }
394 }
395
396 VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
397
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000398 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
399 << *VectorTy << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000400
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000401 for (Value *V : WorkList) {
402 Instruction *Inst = cast<Instruction>(V);
Tom Stellard880a80a2014-06-17 16:53:14 +0000403 IRBuilder<> Builder(Inst);
404 switch (Inst->getOpcode()) {
405 case Instruction::Load: {
Matt Arsenault0da63502018-08-31 05:49:54 +0000406 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
David Stuttard82618ba2017-06-09 14:16:22 +0000407 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
Tom Stellard880a80a2014-06-17 16:53:14 +0000408 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
Matt Arsenaultefb24542016-07-18 18:34:53 +0000409
410 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
Tom Stellard880a80a2014-06-17 16:53:14 +0000411 Value *VecValue = Builder.CreateLoad(BitCast);
412 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
413 Inst->replaceAllUsesWith(ExtractElement);
414 Inst->eraseFromParent();
415 break;
416 }
417 case Instruction::Store: {
Matt Arsenault0da63502018-08-31 05:49:54 +0000418 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenaultefb24542016-07-18 18:34:53 +0000419
David Stuttard82618ba2017-06-09 14:16:22 +0000420 StoreInst *SI = cast<StoreInst>(Inst);
421 Value *Ptr = SI->getPointerOperand();
Tom Stellard880a80a2014-06-17 16:53:14 +0000422 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
Matt Arsenaultefb24542016-07-18 18:34:53 +0000423 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
Tom Stellard880a80a2014-06-17 16:53:14 +0000424 Value *VecValue = Builder.CreateLoad(BitCast);
425 Value *NewVecValue = Builder.CreateInsertElement(VecValue,
David Stuttard82618ba2017-06-09 14:16:22 +0000426 SI->getValueOperand(),
Tom Stellard880a80a2014-06-17 16:53:14 +0000427 Index);
428 Builder.CreateStore(NewVecValue, BitCast);
429 Inst->eraseFromParent();
430 break;
431 }
432 case Instruction::BitCast:
Matt Arsenault642d2e72014-06-27 16:52:49 +0000433 case Instruction::AddrSpaceCast:
Tom Stellard880a80a2014-06-17 16:53:14 +0000434 break;
435
436 default:
Matt Arsenault642d2e72014-06-27 16:52:49 +0000437 llvm_unreachable("Inconsistency in instructions promotable to vector");
Tom Stellard880a80a2014-06-17 16:53:14 +0000438 }
439 }
440 return true;
441}
442
Matt Arsenaultad134842016-02-02 19:18:53 +0000443static bool isCallPromotable(CallInst *CI) {
Matt Arsenaultad134842016-02-02 19:18:53 +0000444 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
445 if (!II)
446 return false;
447
448 switch (II->getIntrinsicID()) {
449 case Intrinsic::memcpy:
Matt Arsenault7e747f12016-02-02 20:28:10 +0000450 case Intrinsic::memmove:
Matt Arsenaultad134842016-02-02 19:18:53 +0000451 case Intrinsic::memset:
452 case Intrinsic::lifetime_start:
453 case Intrinsic::lifetime_end:
454 case Intrinsic::invariant_start:
455 case Intrinsic::invariant_end:
Piotr Padlewski5dde8092018-05-03 11:03:01 +0000456 case Intrinsic::launder_invariant_group:
Piotr Padlewski5b3db452018-07-02 04:49:30 +0000457 case Intrinsic::strip_invariant_group:
Matt Arsenault7e747f12016-02-02 20:28:10 +0000458 case Intrinsic::objectsize:
Matt Arsenaultad134842016-02-02 19:18:53 +0000459 return true;
460 default:
461 return false;
462 }
463}
464
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000465bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
466 Value *Val,
467 Instruction *Inst,
468 int OpIdx0,
469 int OpIdx1) const {
470 // Figure out which operand is the one we might not be promoting.
471 Value *OtherOp = Inst->getOperand(OpIdx0);
472 if (Val == OtherOp)
473 OtherOp = Inst->getOperand(OpIdx1);
474
Matt Arsenault891fccc2016-05-18 15:57:21 +0000475 if (isa<ConstantPointerNull>(OtherOp))
476 return true;
477
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000478 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
479 if (!isa<AllocaInst>(OtherObj))
480 return false;
481
482 // TODO: We should be able to replace undefs with the right pointer type.
483
484 // TODO: If we know the other base object is another promotable
485 // alloca, not necessarily this alloca, we can do this. The
486 // important part is both must have the same address space at
487 // the end.
488 if (OtherObj != BaseAlloca) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000489 LLVM_DEBUG(
490 dbgs() << "Found a binary instruction with another alloca object\n");
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000491 return false;
492 }
493
494 return true;
495}
496
497bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
498 Value *BaseAlloca,
499 Value *Val,
500 std::vector<Value*> &WorkList) const {
501
Tom Stellard880a80a2014-06-17 16:53:14 +0000502 for (User *User : Val->users()) {
David Majnemer0d955d02016-08-11 22:21:41 +0000503 if (is_contained(WorkList, User))
Tom Stellard880a80a2014-06-17 16:53:14 +0000504 continue;
Matt Arsenaultad134842016-02-02 19:18:53 +0000505
Matt Arsenaultfdcd39a2015-07-28 18:29:14 +0000506 if (CallInst *CI = dyn_cast<CallInst>(User)) {
Matt Arsenaultad134842016-02-02 19:18:53 +0000507 if (!isCallPromotable(CI))
Matt Arsenaultfdcd39a2015-07-28 18:29:14 +0000508 return false;
509
Tom Stellard880a80a2014-06-17 16:53:14 +0000510 WorkList.push_back(User);
511 continue;
512 }
Tom Stellard5b2927f2014-10-31 20:52:04 +0000513
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000514 Instruction *UseInst = cast<Instruction>(User);
515 if (UseInst->getOpcode() == Instruction::PtrToInt)
Tom Stellard5b2927f2014-10-31 20:52:04 +0000516 return false;
517
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000518 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
Matt Arsenaultc438ef52016-05-18 23:20:24 +0000519 if (LI->isVolatile())
520 return false;
521
522 continue;
523 }
524
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000525 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000526 if (SI->isVolatile())
527 return false;
528
Matt Arsenault7227cc12015-07-28 18:47:00 +0000529 // Reject if the stored value is not the pointer operand.
530 if (SI->getPointerOperand() != Val)
531 return false;
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000532 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000533 if (RMW->isVolatile())
534 return false;
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000535 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000536 if (CAS->isVolatile())
537 return false;
Matt Arsenault7227cc12015-07-28 18:47:00 +0000538 }
539
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000540 // Only promote a select if we know that the other select operand
541 // is from another pointer that will also be promoted.
542 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
543 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
544 return false;
Matt Arsenault891fccc2016-05-18 15:57:21 +0000545
546 // May need to rewrite constant operands.
547 WorkList.push_back(ICmp);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000548 }
549
Matt Arsenault2402b952016-12-10 00:52:50 +0000550 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
Changpeng Fangc85abbd2017-01-24 19:06:28 +0000551 // Give up if the pointer may be captured.
552 if (PointerMayBeCaptured(UseInst, true, true))
553 return false;
Matt Arsenault2402b952016-12-10 00:52:50 +0000554 // Don't collect the users of this.
555 WorkList.push_back(User);
556 continue;
557 }
558
Tom Stellard880a80a2014-06-17 16:53:14 +0000559 if (!User->getType()->isPointerTy())
560 continue;
Tom Stellard5b2927f2014-10-31 20:52:04 +0000561
Matt Arsenaultde420812016-02-02 21:16:12 +0000562 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
563 // Be conservative if an address could be computed outside the bounds of
564 // the alloca.
565 if (!GEP->isInBounds())
566 return false;
567 }
568
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000569 // Only promote a select if we know that the other select operand is from
570 // another pointer that will also be promoted.
571 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
572 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
573 return false;
574 }
575
576 // Repeat for phis.
577 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
578 // TODO: Handle more complex cases. We should be able to replace loops
579 // over arrays.
580 switch (Phi->getNumIncomingValues()) {
581 case 1:
582 break;
583 case 2:
584 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
585 return false;
586 break;
587 default:
588 return false;
589 }
590 }
591
Tom Stellard880a80a2014-06-17 16:53:14 +0000592 WorkList.push_back(User);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000593 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
Matt Arsenaultad134842016-02-02 19:18:53 +0000594 return false;
Tom Stellard880a80a2014-06-17 16:53:14 +0000595 }
Matt Arsenaultad134842016-02-02 19:18:53 +0000596
597 return true;
Tom Stellard880a80a2014-06-17 16:53:14 +0000598}
599
Changpeng Fang1dbace12017-05-23 20:25:41 +0000600bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
601
602 FunctionType *FTy = F.getFunctionType();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000603 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
Changpeng Fang1dbace12017-05-23 20:25:41 +0000604
605 // If the function has any arguments in the local address space, then it's
606 // possible these arguments require the entire local memory space, so
607 // we cannot use local memory in the pass.
608 for (Type *ParamTy : FTy->params()) {
609 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
Matt Arsenault0da63502018-08-31 05:49:54 +0000610 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
Changpeng Fang1dbace12017-05-23 20:25:41 +0000611 LocalMemLimit = 0;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000612 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
613 "local memory disabled.\n");
Changpeng Fang1dbace12017-05-23 20:25:41 +0000614 return false;
615 }
616 }
617
618 LocalMemLimit = ST.getLocalMemorySize();
619 if (LocalMemLimit == 0)
620 return false;
621
622 const DataLayout &DL = Mod->getDataLayout();
623
624 // Check how much local memory is being used by global objects
625 CurrentLocalMemUsage = 0;
626 for (GlobalVariable &GV : Mod->globals()) {
Matt Arsenault0da63502018-08-31 05:49:54 +0000627 if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
Changpeng Fang1dbace12017-05-23 20:25:41 +0000628 continue;
629
630 for (const User *U : GV.users()) {
631 const Instruction *Use = dyn_cast<Instruction>(U);
632 if (!Use)
633 continue;
634
635 if (Use->getParent()->getParent() == &F) {
636 unsigned Align = GV.getAlignment();
637 if (Align == 0)
638 Align = DL.getABITypeAlignment(GV.getValueType());
639
640 // FIXME: Try to account for padding here. The padding is currently
641 // determined from the inverse order of uses in the function. I'm not
642 // sure if the use list order is in any way connected to this, so the
643 // total reported size is likely incorrect.
644 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
645 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
646 CurrentLocalMemUsage += AllocSize;
647 break;
648 }
649 }
650 }
651
652 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
653 F);
654
655 // Restrict local memory usage so that we don't drastically reduce occupancy,
656 // unless it is already significantly reduced.
657
658 // TODO: Have some sort of hint or other heuristics to guess occupancy based
659 // on other factors..
660 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
661 if (OccupancyHint == 0)
662 OccupancyHint = 7;
663
664 // Clamp to max value.
665 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
666
667 // Check the hint but ignore it if it's obviously wrong from the existing LDS
668 // usage.
669 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
670
671
672 // Round up to the next tier of usage.
673 unsigned MaxSizeWithWaveCount
674 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
675
676 // Program is possibly broken by using more local mem than available.
677 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
678 return false;
679
680 LocalMemLimit = MaxSizeWithWaveCount;
681
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000682 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
683 << " bytes of LDS\n"
684 << " Rounding size to " << MaxSizeWithWaveCount
685 << " with a maximum occupancy of " << MaxOccupancy << '\n'
686 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
687 << " available for promotion\n");
Changpeng Fang1dbace12017-05-23 20:25:41 +0000688
689 return true;
690}
691
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000692// FIXME: Should try to pick the most likely to be profitable allocas first.
Changpeng Fang1dbace12017-05-23 20:25:41 +0000693bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
Matt Arsenaultc5fce692016-04-28 18:38:48 +0000694 // Array allocations are probably not worth handling, since an allocation of
695 // the array type is the canonical form.
696 if (!I.isStaticAlloca() || I.isArrayAllocation())
Changpeng Fang1dbace12017-05-23 20:25:41 +0000697 return false;
Matt Arsenault19c54882015-08-26 18:37:13 +0000698
Tom Stellard880a80a2014-06-17 16:53:14 +0000699 IRBuilder<> Builder(&I);
700
701 // First try to replace the alloca with a vector
702 Type *AllocaTy = I.getAllocatedType();
703
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000704 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000705
Matt Arsenault0da63502018-08-31 05:49:54 +0000706 if (tryPromoteAllocaToVector(&I))
Changpeng Fang1dbace12017-05-23 20:25:41 +0000707 return true; // Promoted to vector.
Tom Stellard880a80a2014-06-17 16:53:14 +0000708
Tom Stellard79a1fd72016-04-14 16:27:07 +0000709 const Function &ContainingFunction = *I.getParent()->getParent();
Matt Arsenault5c806182017-05-02 18:33:18 +0000710 CallingConv::ID CC = ContainingFunction.getCallingConv();
Tom Stellard79a1fd72016-04-14 16:27:07 +0000711
Nicolai Haehnlebef1ceb2016-07-18 09:02:47 +0000712 // Don't promote the alloca to LDS for shader calling conventions as the work
713 // item ID intrinsics are not supported for these calling conventions.
714 // Furthermore not all LDS is available for some of the stages.
Matt Arsenault5c806182017-05-02 18:33:18 +0000715 switch (CC) {
716 case CallingConv::AMDGPU_KERNEL:
717 case CallingConv::SPIR_KERNEL:
718 break;
719 default:
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000720 LLVM_DEBUG(
721 dbgs()
722 << " promote alloca to LDS not supported with calling convention.\n");
Changpeng Fang1dbace12017-05-23 20:25:41 +0000723 return false;
Matt Arsenault5c806182017-05-02 18:33:18 +0000724 }
Nicolai Haehnlebef1ceb2016-07-18 09:02:47 +0000725
Changpeng Fang1dbace12017-05-23 20:25:41 +0000726 // Not likely to have sufficient local memory for promotion.
727 if (!SufficientLDS)
728 return false;
729
Tom Stellard5bfbae52018-07-11 20:59:01 +0000730 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, ContainingFunction);
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000731 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000732
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000733 const DataLayout &DL = Mod->getDataLayout();
Tom Stellard880a80a2014-06-17 16:53:14 +0000734
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000735 unsigned Align = I.getAlignment();
736 if (Align == 0)
737 Align = DL.getABITypeAlignment(I.getAllocatedType());
738
739 // FIXME: This computed padding is likely wrong since it depends on inverse
740 // usage order.
741 //
742 // FIXME: It is also possible that if we're allowed to use all of the memory
743 // could could end up using more than the maximum due to alignment padding.
744
745 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
746 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
747 NewSize += AllocSize;
748
749 if (NewSize > LocalMemLimit) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000750 LLVM_DEBUG(dbgs() << " " << AllocSize
751 << " bytes of local memory not available to promote\n");
Changpeng Fang1dbace12017-05-23 20:25:41 +0000752 return false;
Tom Stellard880a80a2014-06-17 16:53:14 +0000753 }
754
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000755 CurrentLocalMemUsage = NewSize;
756
Tom Stellard5b2927f2014-10-31 20:52:04 +0000757 std::vector<Value*> WorkList;
758
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000759 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000760 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
Changpeng Fang1dbace12017-05-23 20:25:41 +0000761 return false;
Tom Stellard5b2927f2014-10-31 20:52:04 +0000762 }
763
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000764 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000765
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000766 Function *F = I.getParent()->getParent();
767
Tom Stellard79a1fd72016-04-14 16:27:07 +0000768 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
Tom Stellard880a80a2014-06-17 16:53:14 +0000769 GlobalVariable *GV = new GlobalVariable(
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000770 *Mod, GVTy, false, GlobalValue::InternalLinkage,
771 UndefValue::get(GVTy),
772 Twine(F->getName()) + Twine('.') + I.getName(),
773 nullptr,
774 GlobalVariable::NotThreadLocal,
Matt Arsenault0da63502018-08-31 05:49:54 +0000775 AMDGPUAS::LOCAL_ADDRESS);
Peter Collingbourne96efdd62016-06-14 21:01:22 +0000776 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000777 GV->setAlignment(I.getAlignment());
Tom Stellard880a80a2014-06-17 16:53:14 +0000778
Matt Arsenaulte0132462016-01-30 05:19:45 +0000779 Value *TCntY, *TCntZ;
Tom Stellard880a80a2014-06-17 16:53:14 +0000780
Matt Arsenaulte0132462016-01-30 05:19:45 +0000781 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
782 Value *TIdX = getWorkitemID(Builder, 0);
783 Value *TIdY = getWorkitemID(Builder, 1);
784 Value *TIdZ = getWorkitemID(Builder, 2);
Tom Stellard880a80a2014-06-17 16:53:14 +0000785
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000786 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
Tom Stellard880a80a2014-06-17 16:53:14 +0000787 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000788 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
Tom Stellard880a80a2014-06-17 16:53:14 +0000789 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
790 TID = Builder.CreateAdd(TID, TIdZ);
791
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000792 Value *Indices[] = {
793 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
794 TID
795 };
Tom Stellard880a80a2014-06-17 16:53:14 +0000796
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000797 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
Tom Stellard880a80a2014-06-17 16:53:14 +0000798 I.mutateType(Offset->getType());
799 I.replaceAllUsesWith(Offset);
800 I.eraseFromParent();
801
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000802 for (Value *V : WorkList) {
Tom Stellard880a80a2014-06-17 16:53:14 +0000803 CallInst *Call = dyn_cast<CallInst>(V);
804 if (!Call) {
Matt Arsenault891fccc2016-05-18 15:57:21 +0000805 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
806 Value *Src0 = CI->getOperand(0);
807 Type *EltTy = Src0->getType()->getPointerElementType();
Matt Arsenault0da63502018-08-31 05:49:54 +0000808 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenault891fccc2016-05-18 15:57:21 +0000809
810 if (isa<ConstantPointerNull>(CI->getOperand(0)))
811 CI->setOperand(0, ConstantPointerNull::get(NewTy));
812
813 if (isa<ConstantPointerNull>(CI->getOperand(1)))
814 CI->setOperand(1, ConstantPointerNull::get(NewTy));
815
816 continue;
817 }
Matt Arsenault65f67e42014-09-15 15:41:44 +0000818
Matt Arsenault2402b952016-12-10 00:52:50 +0000819 // The operand's value should be corrected on its own and we don't want to
820 // touch the users.
Matt Arsenault65f67e42014-09-15 15:41:44 +0000821 if (isa<AddrSpaceCastInst>(V))
822 continue;
823
Matt Arsenault891fccc2016-05-18 15:57:21 +0000824 Type *EltTy = V->getType()->getPointerElementType();
Matt Arsenault0da63502018-08-31 05:49:54 +0000825 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenault891fccc2016-05-18 15:57:21 +0000826
Matt Arsenault65f67e42014-09-15 15:41:44 +0000827 // FIXME: It doesn't really make sense to try to do this for all
828 // instructions.
Tom Stellard880a80a2014-06-17 16:53:14 +0000829 V->mutateType(NewTy);
Matt Arsenault891fccc2016-05-18 15:57:21 +0000830
831 // Adjust the types of any constant operands.
832 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
833 if (isa<ConstantPointerNull>(SI->getOperand(1)))
834 SI->setOperand(1, ConstantPointerNull::get(NewTy));
835
836 if (isa<ConstantPointerNull>(SI->getOperand(2)))
837 SI->setOperand(2, ConstantPointerNull::get(NewTy));
838 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
839 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
840 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
841 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
842 }
843 }
844
Tom Stellard880a80a2014-06-17 16:53:14 +0000845 continue;
846 }
847
Matt Arsenault2e08e182016-07-18 18:34:48 +0000848 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
Tom Stellard880a80a2014-06-17 16:53:14 +0000849 Builder.SetInsertPoint(Intr);
850 switch (Intr->getIntrinsicID()) {
851 case Intrinsic::lifetime_start:
852 case Intrinsic::lifetime_end:
853 // These intrinsics are for address space 0 only
854 Intr->eraseFromParent();
855 continue;
856 case Intrinsic::memcpy: {
857 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
Daniel Neilsona60f4622018-02-09 21:56:15 +0000858 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlignment(),
859 MemCpy->getRawSource(), MemCpy->getSourceAlignment(),
860 MemCpy->getLength(), MemCpy->isVolatile());
Tom Stellard880a80a2014-06-17 16:53:14 +0000861 Intr->eraseFromParent();
862 continue;
863 }
Matt Arsenault7e747f12016-02-02 20:28:10 +0000864 case Intrinsic::memmove: {
865 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
Daniel Neilsona60f4622018-02-09 21:56:15 +0000866 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlignment(),
867 MemMove->getRawSource(), MemMove->getSourceAlignment(),
868 MemMove->getLength(), MemMove->isVolatile());
Matt Arsenault7e747f12016-02-02 20:28:10 +0000869 Intr->eraseFromParent();
870 continue;
871 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000872 case Intrinsic::memset: {
873 MemSetInst *MemSet = cast<MemSetInst>(Intr);
874 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
Daniel Neilsona60f4622018-02-09 21:56:15 +0000875 MemSet->getLength(), MemSet->getDestAlignment(),
Tom Stellard880a80a2014-06-17 16:53:14 +0000876 MemSet->isVolatile());
877 Intr->eraseFromParent();
878 continue;
879 }
Matt Arsenault0b783ef02016-01-22 19:47:54 +0000880 case Intrinsic::invariant_start:
881 case Intrinsic::invariant_end:
Piotr Padlewski5dde8092018-05-03 11:03:01 +0000882 case Intrinsic::launder_invariant_group:
Piotr Padlewski5b3db452018-07-02 04:49:30 +0000883 case Intrinsic::strip_invariant_group:
Matt Arsenault0b783ef02016-01-22 19:47:54 +0000884 Intr->eraseFromParent();
885 // FIXME: I think the invariant marker should still theoretically apply,
886 // but the intrinsics need to be changed to accept pointers with any
887 // address space.
888 continue;
Matt Arsenault7e747f12016-02-02 20:28:10 +0000889 case Intrinsic::objectsize: {
890 Value *Src = Intr->getOperand(0);
891 Type *SrcTy = Src->getType()->getPointerElementType();
892 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
893 Intrinsic::objectsize,
Matt Arsenault0da63502018-08-31 05:49:54 +0000894 { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
Matt Arsenault7e747f12016-02-02 20:28:10 +0000895 );
896
George Burgess IV56c7e882017-03-21 20:08:59 +0000897 CallInst *NewCall = Builder.CreateCall(
898 ObjectSize, {Src, Intr->getOperand(1), Intr->getOperand(2)});
Matt Arsenault7e747f12016-02-02 20:28:10 +0000899 Intr->replaceAllUsesWith(NewCall);
900 Intr->eraseFromParent();
901 continue;
902 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000903 default:
Matthias Braun8c209aa2017-01-28 02:02:38 +0000904 Intr->print(errs());
Tom Stellard880a80a2014-06-17 16:53:14 +0000905 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
906 }
907 }
Changpeng Fang1dbace12017-05-23 20:25:41 +0000908 return true;
Tom Stellard880a80a2014-06-17 16:53:14 +0000909}
910
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000911FunctionPass *llvm::createAMDGPUPromoteAlloca() {
912 return new AMDGPUPromoteAlloca();
Tom Stellard880a80a2014-06-17 16:53:14 +0000913}