blob: ca25634afdb38bcbbc843ddee6cf79793847ff3a [file] [log] [blame]
Tom Stellard880a80a2014-06-17 16:53:14 +00001//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass eliminates allocas by either converting them into vectors or
11// by migrating them to local address space.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUSubtarget.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000017#include "Utils/AMDGPUBaseInfo.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Triple.h"
23#include "llvm/ADT/Twine.h"
Changpeng Fangc85abbd2017-01-24 19:06:28 +000024#include "llvm/Analysis/CaptureTracking.h"
Tom Stellard880a80a2014-06-17 16:53:14 +000025#include "llvm/Analysis/ValueTracking.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000026#include "llvm/IR/Attributes.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/GlobalVariable.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/Instructions.h"
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +000037#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000038#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/IRBuilder.h"
40#include "llvm/IR/LLVMContext.h"
Matt Arsenaulte0132462016-01-30 05:19:45 +000041#include "llvm/IR/MDBuilder.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000042#include "llvm/IR/Metadata.h"
43#include "llvm/IR/Module.h"
44#include "llvm/IR/Type.h"
45#include "llvm/IR/User.h"
46#include "llvm/IR/Value.h"
47#include "llvm/Pass.h"
48#include "llvm/Support/Casting.h"
Tom Stellard880a80a2014-06-17 16:53:14 +000049#include "llvm/Support/Debug.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000050#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/MathExtras.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000052#include "llvm/Support/raw_ostream.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000053#include "llvm/Target/TargetMachine.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <map>
58#include <tuple>
59#include <utility>
60#include <vector>
Tom Stellard880a80a2014-06-17 16:53:14 +000061
62#define DEBUG_TYPE "amdgpu-promote-alloca"
63
64using namespace llvm;
65
66namespace {
67
Matt Arsenaulte0132462016-01-30 05:19:45 +000068// FIXME: This can create globals so should be a module pass.
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +000069class AMDGPUPromoteAlloca : public FunctionPass {
Matt Arsenaulte0132462016-01-30 05:19:45 +000070private:
71 const TargetMachine *TM;
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000072 Module *Mod = nullptr;
73 const DataLayout *DL = nullptr;
74 MDNode *MaxWorkGroupSizeRange = nullptr;
Matt Arsenaulte0132462016-01-30 05:19:45 +000075
76 // FIXME: This should be per-kernel.
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000077 uint32_t LocalMemLimit = 0;
78 uint32_t CurrentLocalMemUsage = 0;
Tom Stellard880a80a2014-06-17 16:53:14 +000079
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000080 bool IsAMDGCN = false;
81 bool IsAMDHSA = false;
Matt Arsenaulte0132462016-01-30 05:19:45 +000082
83 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
84 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
85
Matt Arsenaulta61cb482016-05-12 01:58:58 +000086 /// BaseAlloca is the alloca root the search started from.
87 /// Val may be that alloca or a recursive user of it.
88 bool collectUsesWithPtrTypes(Value *BaseAlloca,
89 Value *Val,
90 std::vector<Value*> &WorkList) const;
91
92 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
93 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
94 /// Returns true if both operands are derived from the same alloca. Val should
95 /// be the same value as one of the input operands of UseInst.
96 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
97 Instruction *UseInst,
98 int OpIdx0, int OpIdx1) const;
99
Tom Stellard880a80a2014-06-17 16:53:14 +0000100public:
Matt Arsenaulte0132462016-01-30 05:19:45 +0000101 static char ID;
102
103 AMDGPUPromoteAlloca(const TargetMachine *TM_ = nullptr) :
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000104 FunctionPass(ID), TM(TM_) {}
Matt Arsenaulte0132462016-01-30 05:19:45 +0000105
Benjamin Kramer8c90fd72014-09-03 11:41:21 +0000106 bool doInitialization(Module &M) override;
107 bool runOnFunction(Function &F) override;
Matt Arsenaulte0132462016-01-30 05:19:45 +0000108
Mehdi Amini117296c2016-10-01 02:56:57 +0000109 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
Matt Arsenaulte0132462016-01-30 05:19:45 +0000110
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +0000111 void handleAlloca(AllocaInst &I);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000112
113 void getAnalysisUsage(AnalysisUsage &AU) const override {
114 AU.setPreservesCFG();
115 FunctionPass::getAnalysisUsage(AU);
116 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000117};
118
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000119} // end anonymous namespace
Tom Stellard880a80a2014-06-17 16:53:14 +0000120
121char AMDGPUPromoteAlloca::ID = 0;
122
Matt Arsenaulte0132462016-01-30 05:19:45 +0000123INITIALIZE_TM_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
124 "AMDGPU promote alloca to vector or LDS", false, false)
125
126char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
127
Tom Stellard880a80a2014-06-17 16:53:14 +0000128bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
Matt Arsenaulte0132462016-01-30 05:19:45 +0000129 if (!TM)
130 return false;
131
Tom Stellard880a80a2014-06-17 16:53:14 +0000132 Mod = &M;
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000133 DL = &Mod->getDataLayout();
Matt Arsenaulte0132462016-01-30 05:19:45 +0000134
135 // The maximum workitem id.
136 //
137 // FIXME: Should get as subtarget property. Usually runtime enforced max is
138 // 256.
139 MDBuilder MDB(Mod->getContext());
140 MaxWorkGroupSizeRange = MDB.createRange(APInt(32, 0), APInt(32, 2048));
141
142 const Triple &TT = TM->getTargetTriple();
143
144 IsAMDGCN = TT.getArch() == Triple::amdgcn;
145 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
146
Tom Stellard880a80a2014-06-17 16:53:14 +0000147 return false;
148}
149
150bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000151 if (!TM || skipFunction(F))
Matt Arsenaulte0132462016-01-30 05:19:45 +0000152 return false;
153
Matt Arsenault03d85842016-06-27 20:32:13 +0000154 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
155 if (!ST.isPromoteAllocaEnabled())
156 return false;
157
Craig Toppere3dcce92015-08-01 22:20:21 +0000158 FunctionType *FTy = F.getFunctionType();
Tom Stellard880a80a2014-06-17 16:53:14 +0000159
160 // If the function has any arguments in the local address space, then it's
161 // possible these arguments require the entire local memory space, so
162 // we cannot use local memory in the pass.
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000163 for (Type *ParamTy : FTy->params()) {
164 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
165 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000166 LocalMemLimit = 0;
167 DEBUG(dbgs() << "Function has local memory argument. Promoting to "
Tom Stellard880a80a2014-06-17 16:53:14 +0000168 "local memory disabled.\n");
Matt Arsenaulte5737f72016-02-02 19:18:57 +0000169 return false;
Tom Stellard880a80a2014-06-17 16:53:14 +0000170 }
171 }
172
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000173 LocalMemLimit = ST.getLocalMemorySize();
174 if (LocalMemLimit == 0)
Matt Arsenaulte5737f72016-02-02 19:18:57 +0000175 return false;
176
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000177 const DataLayout &DL = Mod->getDataLayout();
178
Matt Arsenaulte5737f72016-02-02 19:18:57 +0000179 // Check how much local memory is being used by global objects
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000180 CurrentLocalMemUsage = 0;
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000181 for (GlobalVariable &GV : Mod->globals()) {
182 if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
Matt Arsenaulte5737f72016-02-02 19:18:57 +0000183 continue;
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000184
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000185 for (const User *U : GV.users()) {
186 const Instruction *Use = dyn_cast<Instruction>(U);
Matt Arsenaulte5737f72016-02-02 19:18:57 +0000187 if (!Use)
Tom Stellard880a80a2014-06-17 16:53:14 +0000188 continue;
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000189
Matt Arsenault0547b012016-04-27 21:05:08 +0000190 if (Use->getParent()->getParent() == &F) {
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000191 unsigned Align = GV.getAlignment();
192 if (Align == 0)
193 Align = DL.getABITypeAlignment(GV.getValueType());
194
195 // FIXME: Try to account for padding here. The padding is currently
196 // determined from the inverse order of uses in the function. I'm not
197 // sure if the use list order is in any way connected to this, so the
198 // total reported size is likely incorrect.
199 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
200 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
201 CurrentLocalMemUsage += AllocSize;
Matt Arsenault0547b012016-04-27 21:05:08 +0000202 break;
203 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000204 }
205 }
206
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000207 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
208 F);
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000209
210 // Restrict local memory usage so that we don't drastically reduce occupancy,
211 // unless it is already significantly reduced.
212
213 // TODO: Have some sort of hint or other heuristics to guess occupancy based
214 // on other factors..
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000215 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000216 if (OccupancyHint == 0)
217 OccupancyHint = 7;
218
219 // Clamp to max value.
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000220 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000221
222 // Check the hint but ignore it if it's obviously wrong from the existing LDS
223 // usage.
224 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
225
226
227 // Round up to the next tier of usage.
228 unsigned MaxSizeWithWaveCount
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000229 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000230
231 // Program is possibly broken by using more local mem than available.
232 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
233 return false;
234
235 LocalMemLimit = MaxSizeWithWaveCount;
236
237 DEBUG(
238 dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n"
239 << " Rounding size to " << MaxSizeWithWaveCount
240 << " with a maximum occupancy of " << MaxOccupancy << '\n'
241 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
242 << " available for promotion\n"
243 );
Tom Stellard880a80a2014-06-17 16:53:14 +0000244
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +0000245 BasicBlock &EntryBB = *F.begin();
246 for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
247 AllocaInst *AI = dyn_cast<AllocaInst>(I);
248
249 ++I;
250 if (AI)
251 handleAlloca(*AI);
252 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000253
Matt Arsenaulte5737f72016-02-02 19:18:57 +0000254 return true;
Tom Stellard880a80a2014-06-17 16:53:14 +0000255}
256
Matt Arsenaulte0132462016-01-30 05:19:45 +0000257std::pair<Value *, Value *>
258AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
259 if (!IsAMDHSA) {
260 Function *LocalSizeYFn
261 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
262 Function *LocalSizeZFn
263 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
264
265 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
266 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
267
268 LocalSizeY->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
269 LocalSizeZ->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
270
271 return std::make_pair(LocalSizeY, LocalSizeZ);
272 }
273
274 // We must read the size out of the dispatch pointer.
275 assert(IsAMDGCN);
276
277 // We are indexing into this struct, and want to extract the workgroup_size_*
278 // fields.
279 //
280 // typedef struct hsa_kernel_dispatch_packet_s {
281 // uint16_t header;
282 // uint16_t setup;
283 // uint16_t workgroup_size_x ;
284 // uint16_t workgroup_size_y;
285 // uint16_t workgroup_size_z;
286 // uint16_t reserved0;
287 // uint32_t grid_size_x ;
288 // uint32_t grid_size_y ;
289 // uint32_t grid_size_z;
290 //
291 // uint32_t private_segment_size;
292 // uint32_t group_segment_size;
293 // uint64_t kernel_object;
294 //
295 // #ifdef HSA_LARGE_MODEL
296 // void *kernarg_address;
297 // #elif defined HSA_LITTLE_ENDIAN
298 // void *kernarg_address;
299 // uint32_t reserved1;
300 // #else
301 // uint32_t reserved1;
302 // void *kernarg_address;
303 // #endif
304 // uint64_t reserved2;
305 // hsa_signal_t completion_signal; // uint64_t wrapper
306 // } hsa_kernel_dispatch_packet_t
307 //
308 Function *DispatchPtrFn
309 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
310
311 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
312 DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NoAlias);
313 DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
314
315 // Size of the dispatch packet struct.
316 DispatchPtr->addDereferenceableAttr(AttributeSet::ReturnIndex, 64);
317
318 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
319 Value *CastDispatchPtr = Builder.CreateBitCast(
320 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
321
322 // We could do a single 64-bit load here, but it's likely that the basic
323 // 32-bit and extract sequence is already present, and it is probably easier
324 // to CSE this. The loads should be mergable later anyway.
325 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
326 LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
327
328 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
329 LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
330
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000331 MDNode *MD = MDNode::get(Mod->getContext(), None);
Matt Arsenaulte0132462016-01-30 05:19:45 +0000332 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
333 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
334 LoadZU->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
335
336 // Extract y component. Upper half of LoadZU should be zero already.
337 Value *Y = Builder.CreateLShr(LoadXY, 16);
338
339 return std::make_pair(Y, LoadZU);
340}
341
342Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
343 Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
344
345 switch (N) {
346 case 0:
347 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
348 : Intrinsic::r600_read_tidig_x;
349 break;
350 case 1:
351 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
352 : Intrinsic::r600_read_tidig_y;
353 break;
354
355 case 2:
356 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
357 : Intrinsic::r600_read_tidig_z;
358 break;
359 default:
360 llvm_unreachable("invalid dimension");
361 }
362
363 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
364 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
365 CI->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
366
367 return CI;
368}
369
Craig Toppere3dcce92015-08-01 22:20:21 +0000370static VectorType *arrayTypeToVecType(Type *ArrayTy) {
Tom Stellard880a80a2014-06-17 16:53:14 +0000371 return VectorType::get(ArrayTy->getArrayElementType(),
372 ArrayTy->getArrayNumElements());
373}
374
Benjamin Kramerc6cc58e2014-10-04 16:55:56 +0000375static Value *
376calculateVectorIndex(Value *Ptr,
377 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
Tom Stellard880a80a2014-06-17 16:53:14 +0000378 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
379
Benjamin Kramerc6cc58e2014-10-04 16:55:56 +0000380 auto I = GEPIdx.find(GEP);
381 return I == GEPIdx.end() ? nullptr : I->second;
Tom Stellard880a80a2014-06-17 16:53:14 +0000382}
383
384static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
385 // FIXME we only support simple cases
386 if (GEP->getNumOperands() != 3)
Matt Arsenaultefb24542016-07-18 18:34:53 +0000387 return nullptr;
Tom Stellard880a80a2014-06-17 16:53:14 +0000388
389 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
390 if (!I0 || !I0->isZero())
Matt Arsenaultefb24542016-07-18 18:34:53 +0000391 return nullptr;
Tom Stellard880a80a2014-06-17 16:53:14 +0000392
393 return GEP->getOperand(2);
394}
395
Matt Arsenault642d2e72014-06-27 16:52:49 +0000396// Not an instruction handled below to turn into a vector.
397//
398// TODO: Check isTriviallyVectorizable for calls and handle other
399// instructions.
Matt Arsenault7227cc12015-07-28 18:47:00 +0000400static bool canVectorizeInst(Instruction *Inst, User *User) {
Matt Arsenault642d2e72014-06-27 16:52:49 +0000401 switch (Inst->getOpcode()) {
402 case Instruction::Load:
Matt Arsenault642d2e72014-06-27 16:52:49 +0000403 case Instruction::BitCast:
404 case Instruction::AddrSpaceCast:
405 return true;
Matt Arsenault7227cc12015-07-28 18:47:00 +0000406 case Instruction::Store: {
407 // Must be the stored pointer operand, not a stored value.
408 StoreInst *SI = cast<StoreInst>(Inst);
409 return SI->getPointerOperand() == User;
410 }
Matt Arsenault642d2e72014-06-27 16:52:49 +0000411 default:
412 return false;
413 }
414}
415
Tom Stellard880a80a2014-06-17 16:53:14 +0000416static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000417 ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType());
Tom Stellard880a80a2014-06-17 16:53:14 +0000418
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000419 DEBUG(dbgs() << "Alloca candidate for vectorization\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000420
421 // FIXME: There is no reason why we can't support larger arrays, we
422 // are just being conservative for now.
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000423 if (!AllocaTy ||
424 AllocaTy->getElementType()->isVectorTy() ||
Matt Arsenaultefb24542016-07-18 18:34:53 +0000425 AllocaTy->getNumElements() > 4 ||
426 AllocaTy->getNumElements() < 2) {
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000427 DEBUG(dbgs() << " Cannot convert type to vector\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000428 return false;
429 }
430
431 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
432 std::vector<Value*> WorkList;
433 for (User *AllocaUser : Alloca->users()) {
434 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
435 if (!GEP) {
Matt Arsenault7227cc12015-07-28 18:47:00 +0000436 if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca))
Matt Arsenault642d2e72014-06-27 16:52:49 +0000437 return false;
438
Tom Stellard880a80a2014-06-17 16:53:14 +0000439 WorkList.push_back(AllocaUser);
440 continue;
441 }
442
443 Value *Index = GEPToVectorIndex(GEP);
444
445 // If we can't compute a vector index from this GEP, then we can't
446 // promote this alloca to vector.
447 if (!Index) {
Matt Arsenault6f62cf82014-06-27 02:36:59 +0000448 DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000449 return false;
450 }
451
452 GEPVectorIdx[GEP] = Index;
453 for (User *GEPUser : AllocaUser->users()) {
Matt Arsenault7227cc12015-07-28 18:47:00 +0000454 if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser))
Matt Arsenault642d2e72014-06-27 16:52:49 +0000455 return false;
456
Tom Stellard880a80a2014-06-17 16:53:14 +0000457 WorkList.push_back(GEPUser);
458 }
459 }
460
461 VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
462
Matt Arsenault6f62cf82014-06-27 02:36:59 +0000463 DEBUG(dbgs() << " Converting alloca to vector "
464 << *AllocaTy << " -> " << *VectorTy << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000465
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000466 for (Value *V : WorkList) {
467 Instruction *Inst = cast<Instruction>(V);
Tom Stellard880a80a2014-06-17 16:53:14 +0000468 IRBuilder<> Builder(Inst);
469 switch (Inst->getOpcode()) {
470 case Instruction::Load: {
Matt Arsenaultefb24542016-07-18 18:34:53 +0000471 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
Tom Stellard880a80a2014-06-17 16:53:14 +0000472 Value *Ptr = Inst->getOperand(0);
473 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
Matt Arsenaultefb24542016-07-18 18:34:53 +0000474
475 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
Tom Stellard880a80a2014-06-17 16:53:14 +0000476 Value *VecValue = Builder.CreateLoad(BitCast);
477 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
478 Inst->replaceAllUsesWith(ExtractElement);
479 Inst->eraseFromParent();
480 break;
481 }
482 case Instruction::Store: {
Matt Arsenaultefb24542016-07-18 18:34:53 +0000483 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
484
Tom Stellard880a80a2014-06-17 16:53:14 +0000485 Value *Ptr = Inst->getOperand(1);
486 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
Matt Arsenaultefb24542016-07-18 18:34:53 +0000487 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
Tom Stellard880a80a2014-06-17 16:53:14 +0000488 Value *VecValue = Builder.CreateLoad(BitCast);
489 Value *NewVecValue = Builder.CreateInsertElement(VecValue,
490 Inst->getOperand(0),
491 Index);
492 Builder.CreateStore(NewVecValue, BitCast);
493 Inst->eraseFromParent();
494 break;
495 }
496 case Instruction::BitCast:
Matt Arsenault642d2e72014-06-27 16:52:49 +0000497 case Instruction::AddrSpaceCast:
Tom Stellard880a80a2014-06-17 16:53:14 +0000498 break;
499
500 default:
Matt Arsenault642d2e72014-06-27 16:52:49 +0000501 llvm_unreachable("Inconsistency in instructions promotable to vector");
Tom Stellard880a80a2014-06-17 16:53:14 +0000502 }
503 }
504 return true;
505}
506
Matt Arsenaultad134842016-02-02 19:18:53 +0000507static bool isCallPromotable(CallInst *CI) {
Matt Arsenaultad134842016-02-02 19:18:53 +0000508 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
509 if (!II)
510 return false;
511
512 switch (II->getIntrinsicID()) {
513 case Intrinsic::memcpy:
Matt Arsenault7e747f12016-02-02 20:28:10 +0000514 case Intrinsic::memmove:
Matt Arsenaultad134842016-02-02 19:18:53 +0000515 case Intrinsic::memset:
516 case Intrinsic::lifetime_start:
517 case Intrinsic::lifetime_end:
518 case Intrinsic::invariant_start:
519 case Intrinsic::invariant_end:
520 case Intrinsic::invariant_group_barrier:
Matt Arsenault7e747f12016-02-02 20:28:10 +0000521 case Intrinsic::objectsize:
Matt Arsenaultad134842016-02-02 19:18:53 +0000522 return true;
523 default:
524 return false;
525 }
526}
527
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000528bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
529 Value *Val,
530 Instruction *Inst,
531 int OpIdx0,
532 int OpIdx1) const {
533 // Figure out which operand is the one we might not be promoting.
534 Value *OtherOp = Inst->getOperand(OpIdx0);
535 if (Val == OtherOp)
536 OtherOp = Inst->getOperand(OpIdx1);
537
Matt Arsenault891fccc2016-05-18 15:57:21 +0000538 if (isa<ConstantPointerNull>(OtherOp))
539 return true;
540
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000541 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
542 if (!isa<AllocaInst>(OtherObj))
543 return false;
544
545 // TODO: We should be able to replace undefs with the right pointer type.
546
547 // TODO: If we know the other base object is another promotable
548 // alloca, not necessarily this alloca, we can do this. The
549 // important part is both must have the same address space at
550 // the end.
551 if (OtherObj != BaseAlloca) {
552 DEBUG(dbgs() << "Found a binary instruction with another alloca object\n");
553 return false;
554 }
555
556 return true;
557}
558
559bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
560 Value *BaseAlloca,
561 Value *Val,
562 std::vector<Value*> &WorkList) const {
563
Tom Stellard880a80a2014-06-17 16:53:14 +0000564 for (User *User : Val->users()) {
David Majnemer0d955d02016-08-11 22:21:41 +0000565 if (is_contained(WorkList, User))
Tom Stellard880a80a2014-06-17 16:53:14 +0000566 continue;
Matt Arsenaultad134842016-02-02 19:18:53 +0000567
Matt Arsenaultfdcd39a2015-07-28 18:29:14 +0000568 if (CallInst *CI = dyn_cast<CallInst>(User)) {
Matt Arsenaultad134842016-02-02 19:18:53 +0000569 if (!isCallPromotable(CI))
Matt Arsenaultfdcd39a2015-07-28 18:29:14 +0000570 return false;
571
Tom Stellard880a80a2014-06-17 16:53:14 +0000572 WorkList.push_back(User);
573 continue;
574 }
Tom Stellard5b2927f2014-10-31 20:52:04 +0000575
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000576 Instruction *UseInst = cast<Instruction>(User);
577 if (UseInst->getOpcode() == Instruction::PtrToInt)
Tom Stellard5b2927f2014-10-31 20:52:04 +0000578 return false;
579
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000580 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
Matt Arsenaultc438ef52016-05-18 23:20:24 +0000581 if (LI->isVolatile())
582 return false;
583
584 continue;
585 }
586
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000587 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000588 if (SI->isVolatile())
589 return false;
590
Matt Arsenault7227cc12015-07-28 18:47:00 +0000591 // Reject if the stored value is not the pointer operand.
592 if (SI->getPointerOperand() != Val)
593 return false;
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000594 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000595 if (RMW->isVolatile())
596 return false;
Matt Arsenault210b7cf2016-07-18 19:00:07 +0000597 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
Matt Arsenault0a30e452016-03-23 23:17:29 +0000598 if (CAS->isVolatile())
599 return false;
Matt Arsenault7227cc12015-07-28 18:47:00 +0000600 }
601
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000602 // Only promote a select if we know that the other select operand
603 // is from another pointer that will also be promoted.
604 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
605 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
606 return false;
Matt Arsenault891fccc2016-05-18 15:57:21 +0000607
608 // May need to rewrite constant operands.
609 WorkList.push_back(ICmp);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000610 }
611
Matt Arsenault2402b952016-12-10 00:52:50 +0000612 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
Changpeng Fangc85abbd2017-01-24 19:06:28 +0000613 // Give up if the pointer may be captured.
614 if (PointerMayBeCaptured(UseInst, true, true))
615 return false;
Matt Arsenault2402b952016-12-10 00:52:50 +0000616 // Don't collect the users of this.
617 WorkList.push_back(User);
618 continue;
619 }
620
Tom Stellard880a80a2014-06-17 16:53:14 +0000621 if (!User->getType()->isPointerTy())
622 continue;
Tom Stellard5b2927f2014-10-31 20:52:04 +0000623
Matt Arsenaultde420812016-02-02 21:16:12 +0000624 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
625 // Be conservative if an address could be computed outside the bounds of
626 // the alloca.
627 if (!GEP->isInBounds())
628 return false;
629 }
630
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000631 // Only promote a select if we know that the other select operand is from
632 // another pointer that will also be promoted.
633 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
634 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
635 return false;
636 }
637
638 // Repeat for phis.
639 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
640 // TODO: Handle more complex cases. We should be able to replace loops
641 // over arrays.
642 switch (Phi->getNumIncomingValues()) {
643 case 1:
644 break;
645 case 2:
646 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
647 return false;
648 break;
649 default:
650 return false;
651 }
652 }
653
Tom Stellard880a80a2014-06-17 16:53:14 +0000654 WorkList.push_back(User);
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000655 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
Matt Arsenaultad134842016-02-02 19:18:53 +0000656 return false;
Tom Stellard880a80a2014-06-17 16:53:14 +0000657 }
Matt Arsenaultad134842016-02-02 19:18:53 +0000658
659 return true;
Tom Stellard880a80a2014-06-17 16:53:14 +0000660}
661
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000662// FIXME: Should try to pick the most likely to be profitable allocas first.
Matt Arsenaultbafc9dc2016-03-11 08:20:50 +0000663void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) {
Matt Arsenaultc5fce692016-04-28 18:38:48 +0000664 // Array allocations are probably not worth handling, since an allocation of
665 // the array type is the canonical form.
666 if (!I.isStaticAlloca() || I.isArrayAllocation())
Matt Arsenault19c54882015-08-26 18:37:13 +0000667 return;
668
Tom Stellard880a80a2014-06-17 16:53:14 +0000669 IRBuilder<> Builder(&I);
670
671 // First try to replace the alloca with a vector
672 Type *AllocaTy = I.getAllocatedType();
673
Matt Arsenault6f62cf82014-06-27 02:36:59 +0000674 DEBUG(dbgs() << "Trying to promote " << I << '\n');
Tom Stellard880a80a2014-06-17 16:53:14 +0000675
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000676 if (tryPromoteAllocaToVector(&I)) {
677 DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000678 return;
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000679 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000680
Tom Stellard79a1fd72016-04-14 16:27:07 +0000681 const Function &ContainingFunction = *I.getParent()->getParent();
682
Nicolai Haehnlebef1ceb2016-07-18 09:02:47 +0000683 // Don't promote the alloca to LDS for shader calling conventions as the work
684 // item ID intrinsics are not supported for these calling conventions.
685 // Furthermore not all LDS is available for some of the stages.
686 if (AMDGPU::isShader(ContainingFunction.getCallingConv()))
687 return;
688
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000689 const AMDGPUSubtarget &ST =
690 TM->getSubtarget<AMDGPUSubtarget>(ContainingFunction);
Tom Stellard79a1fd72016-04-14 16:27:07 +0000691 // FIXME: We should also try to get this value from the reqd_work_group_size
692 // function attribute if it is available.
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000693 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000694
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000695 const DataLayout &DL = Mod->getDataLayout();
Tom Stellard880a80a2014-06-17 16:53:14 +0000696
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000697 unsigned Align = I.getAlignment();
698 if (Align == 0)
699 Align = DL.getABITypeAlignment(I.getAllocatedType());
700
701 // FIXME: This computed padding is likely wrong since it depends on inverse
702 // usage order.
703 //
704 // FIXME: It is also possible that if we're allowed to use all of the memory
705 // could could end up using more than the maximum due to alignment padding.
706
707 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
708 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
709 NewSize += AllocSize;
710
711 if (NewSize > LocalMemLimit) {
712 DEBUG(dbgs() << " " << AllocSize
713 << " bytes of local memory not available to promote\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000714 return;
715 }
716
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000717 CurrentLocalMemUsage = NewSize;
718
Tom Stellard5b2927f2014-10-31 20:52:04 +0000719 std::vector<Value*> WorkList;
720
Matt Arsenaulta61cb482016-05-12 01:58:58 +0000721 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
Tom Stellard5b2927f2014-10-31 20:52:04 +0000722 DEBUG(dbgs() << " Do not know how to convert all uses\n");
723 return;
724 }
725
Tom Stellard880a80a2014-06-17 16:53:14 +0000726 DEBUG(dbgs() << "Promoting alloca to local memory\n");
Tom Stellard880a80a2014-06-17 16:53:14 +0000727
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000728 Function *F = I.getParent()->getParent();
729
Tom Stellard79a1fd72016-04-14 16:27:07 +0000730 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
Tom Stellard880a80a2014-06-17 16:53:14 +0000731 GlobalVariable *GV = new GlobalVariable(
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000732 *Mod, GVTy, false, GlobalValue::InternalLinkage,
733 UndefValue::get(GVTy),
734 Twine(F->getName()) + Twine('.') + I.getName(),
735 nullptr,
736 GlobalVariable::NotThreadLocal,
737 AMDGPUAS::LOCAL_ADDRESS);
Peter Collingbourne96efdd62016-06-14 21:01:22 +0000738 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
Matt Arsenaultcf84e262016-02-05 19:47:23 +0000739 GV->setAlignment(I.getAlignment());
Tom Stellard880a80a2014-06-17 16:53:14 +0000740
Matt Arsenaulte0132462016-01-30 05:19:45 +0000741 Value *TCntY, *TCntZ;
Tom Stellard880a80a2014-06-17 16:53:14 +0000742
Matt Arsenaulte0132462016-01-30 05:19:45 +0000743 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
744 Value *TIdX = getWorkitemID(Builder, 0);
745 Value *TIdY = getWorkitemID(Builder, 1);
746 Value *TIdZ = getWorkitemID(Builder, 2);
Tom Stellard880a80a2014-06-17 16:53:14 +0000747
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000748 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
Tom Stellard880a80a2014-06-17 16:53:14 +0000749 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000750 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
Tom Stellard880a80a2014-06-17 16:53:14 +0000751 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
752 TID = Builder.CreateAdd(TID, TIdZ);
753
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000754 Value *Indices[] = {
755 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
756 TID
757 };
Tom Stellard880a80a2014-06-17 16:53:14 +0000758
Matt Arsenault853a1fc2016-02-02 19:18:48 +0000759 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
Tom Stellard880a80a2014-06-17 16:53:14 +0000760 I.mutateType(Offset->getType());
761 I.replaceAllUsesWith(Offset);
762 I.eraseFromParent();
763
Matt Arsenaultfb8cdba2016-02-02 19:32:35 +0000764 for (Value *V : WorkList) {
Tom Stellard880a80a2014-06-17 16:53:14 +0000765 CallInst *Call = dyn_cast<CallInst>(V);
766 if (!Call) {
Matt Arsenault891fccc2016-05-18 15:57:21 +0000767 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
768 Value *Src0 = CI->getOperand(0);
769 Type *EltTy = Src0->getType()->getPointerElementType();
770 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
771
772 if (isa<ConstantPointerNull>(CI->getOperand(0)))
773 CI->setOperand(0, ConstantPointerNull::get(NewTy));
774
775 if (isa<ConstantPointerNull>(CI->getOperand(1)))
776 CI->setOperand(1, ConstantPointerNull::get(NewTy));
777
778 continue;
779 }
Matt Arsenault65f67e42014-09-15 15:41:44 +0000780
Matt Arsenault2402b952016-12-10 00:52:50 +0000781 // The operand's value should be corrected on its own and we don't want to
782 // touch the users.
Matt Arsenault65f67e42014-09-15 15:41:44 +0000783 if (isa<AddrSpaceCastInst>(V))
784 continue;
785
Matt Arsenault891fccc2016-05-18 15:57:21 +0000786 Type *EltTy = V->getType()->getPointerElementType();
787 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
788
Matt Arsenault65f67e42014-09-15 15:41:44 +0000789 // FIXME: It doesn't really make sense to try to do this for all
790 // instructions.
Tom Stellard880a80a2014-06-17 16:53:14 +0000791 V->mutateType(NewTy);
Matt Arsenault891fccc2016-05-18 15:57:21 +0000792
793 // Adjust the types of any constant operands.
794 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
795 if (isa<ConstantPointerNull>(SI->getOperand(1)))
796 SI->setOperand(1, ConstantPointerNull::get(NewTy));
797
798 if (isa<ConstantPointerNull>(SI->getOperand(2)))
799 SI->setOperand(2, ConstantPointerNull::get(NewTy));
800 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
801 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
802 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
803 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
804 }
805 }
806
Tom Stellard880a80a2014-06-17 16:53:14 +0000807 continue;
808 }
809
Matt Arsenault2e08e182016-07-18 18:34:48 +0000810 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
Tom Stellard880a80a2014-06-17 16:53:14 +0000811 Builder.SetInsertPoint(Intr);
812 switch (Intr->getIntrinsicID()) {
813 case Intrinsic::lifetime_start:
814 case Intrinsic::lifetime_end:
815 // These intrinsics are for address space 0 only
816 Intr->eraseFromParent();
817 continue;
818 case Intrinsic::memcpy: {
819 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
820 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
Pete Cooper67cf9a72015-11-19 05:56:52 +0000821 MemCpy->getLength(), MemCpy->getAlignment(),
822 MemCpy->isVolatile());
Tom Stellard880a80a2014-06-17 16:53:14 +0000823 Intr->eraseFromParent();
824 continue;
825 }
Matt Arsenault7e747f12016-02-02 20:28:10 +0000826 case Intrinsic::memmove: {
827 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
828 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(),
829 MemMove->getLength(), MemMove->getAlignment(),
830 MemMove->isVolatile());
831 Intr->eraseFromParent();
832 continue;
833 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000834 case Intrinsic::memset: {
835 MemSetInst *MemSet = cast<MemSetInst>(Intr);
836 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
Pete Cooper67cf9a72015-11-19 05:56:52 +0000837 MemSet->getLength(), MemSet->getAlignment(),
Tom Stellard880a80a2014-06-17 16:53:14 +0000838 MemSet->isVolatile());
839 Intr->eraseFromParent();
840 continue;
841 }
Matt Arsenault0b783ef02016-01-22 19:47:54 +0000842 case Intrinsic::invariant_start:
843 case Intrinsic::invariant_end:
844 case Intrinsic::invariant_group_barrier:
845 Intr->eraseFromParent();
846 // FIXME: I think the invariant marker should still theoretically apply,
847 // but the intrinsics need to be changed to accept pointers with any
848 // address space.
849 continue;
Matt Arsenault7e747f12016-02-02 20:28:10 +0000850 case Intrinsic::objectsize: {
851 Value *Src = Intr->getOperand(0);
852 Type *SrcTy = Src->getType()->getPointerElementType();
853 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
854 Intrinsic::objectsize,
855 { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
856 );
857
858 CallInst *NewCall
859 = Builder.CreateCall(ObjectSize, { Src, Intr->getOperand(1) });
860 Intr->replaceAllUsesWith(NewCall);
861 Intr->eraseFromParent();
862 continue;
863 }
Tom Stellard880a80a2014-06-17 16:53:14 +0000864 default:
Matthias Braun8c209aa2017-01-28 02:02:38 +0000865 Intr->print(errs());
Tom Stellard880a80a2014-06-17 16:53:14 +0000866 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
867 }
868 }
869}
870
Matt Arsenaulte0132462016-01-30 05:19:45 +0000871FunctionPass *llvm::createAMDGPUPromoteAlloca(const TargetMachine *TM) {
872 return new AMDGPUPromoteAlloca(TM);
Tom Stellard880a80a2014-06-17 16:53:14 +0000873}