blob: 743dc7a0d00b97fb3b119cb6502687fe35aaafa8 [file] [log] [blame]
Matt Arsenault8c4a3522018-06-26 19:10:00 +00001//===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file This pass replaces accesses to kernel arguments with loads from
11/// offsets from the kernarg base pointer.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUSubtarget.h"
17#include "AMDGPUTargetMachine.h"
18#include "llvm/ADT/StringRef.h"
Matt Arsenault8c4a3522018-06-26 19:10:00 +000019#include "llvm/Analysis/Loads.h"
20#include "llvm/CodeGen/Passes.h"
21#include "llvm/CodeGen/TargetPassConfig.h"
22#include "llvm/IR/Attributes.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constants.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/InstrTypes.h"
29#include "llvm/IR/Instruction.h"
30#include "llvm/IR/Instructions.h"
31#include "llvm/IR/LLVMContext.h"
32#include "llvm/IR/MDBuilder.h"
33#include "llvm/IR/Metadata.h"
34#include "llvm/IR/Operator.h"
35#include "llvm/IR/Type.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Pass.h"
38#include "llvm/Support/Casting.h"
39
40#define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
41
42using namespace llvm;
43
44namespace {
45
46class AMDGPULowerKernelArguments : public FunctionPass{
47public:
48 static char ID;
49
50 AMDGPULowerKernelArguments() : FunctionPass(ID) {}
51
52 bool runOnFunction(Function &F) override;
53
54 void getAnalysisUsage(AnalysisUsage &AU) const override {
55 AU.addRequired<TargetPassConfig>();
56 AU.setPreservesAll();
57 }
58};
59
60} // end anonymous namespace
61
62bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
63 CallingConv::ID CC = F.getCallingConv();
64 if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
65 return false;
66
67 auto &TPC = getAnalysis<TargetPassConfig>();
68
69 const TargetMachine &TM = TPC.getTM<TargetMachine>();
Tom Stellard5bfbae52018-07-11 20:59:01 +000070 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
Matt Arsenault8c4a3522018-06-26 19:10:00 +000071 LLVMContext &Ctx = F.getParent()->getContext();
72 const DataLayout &DL = F.getParent()->getDataLayout();
73 BasicBlock &EntryBlock = *F.begin();
74 IRBuilder<> Builder(&*EntryBlock.begin());
75
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +000076 const unsigned KernArgBaseAlign = 16; // FIXME: Increase if necessary
Matt Arsenault8c4a3522018-06-26 19:10:00 +000077 const uint64_t BaseOffset = ST.getExplicitKernelArgOffset(F);
78
Matt Arsenault4bec7d42018-07-20 09:05:08 +000079 unsigned MaxAlign;
Matt Arsenault8c4a3522018-06-26 19:10:00 +000080 // FIXME: Alignment is broken broken with explicit arg offset.;
Matt Arsenault4bec7d42018-07-20 09:05:08 +000081 const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
Matt Arsenault513e0c02018-06-28 10:18:11 +000082 if (TotalKernArgSize == 0)
83 return false;
Matt Arsenault8c4a3522018-06-26 19:10:00 +000084
85 CallInst *KernArgSegment =
Neil Henning57f5d0a2018-10-08 10:32:33 +000086 Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
87 nullptr, F.getName() + ".kernarg.segment");
Matt Arsenault8c4a3522018-06-26 19:10:00 +000088
89 KernArgSegment->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
90 KernArgSegment->addAttribute(AttributeList::ReturnIndex,
91 Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
Matt Arsenault8c4a3522018-06-26 19:10:00 +000092
93 unsigned AS = KernArgSegment->getType()->getPointerAddressSpace();
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +000094 uint64_t ExplicitArgOffset = 0;
95
Matt Arsenault8c4a3522018-06-26 19:10:00 +000096 for (Argument &Arg : F.args()) {
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +000097 Type *ArgTy = Arg.getType();
98 unsigned Align = DL.getABITypeAlignment(ArgTy);
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +000099 unsigned Size = DL.getTypeSizeInBits(ArgTy);
100 unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
101
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000102 uint64_t EltOffset = alignTo(ExplicitArgOffset, Align) + BaseOffset;
103 ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
104
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000105 if (Arg.use_empty())
106 continue;
107
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000108 if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
109 // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
110 // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
111 // can't represent this with range metadata because it's only allowed for
112 // integer types.
113 if (PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
114 ST.getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
115 continue;
116
117 // FIXME: We can replace this with equivalent alias.scope/noalias
118 // metadata, but this appears to be a lot of work.
119 if (Arg.hasNoAliasAttr())
120 continue;
121 }
122
123 VectorType *VT = dyn_cast<VectorType>(ArgTy);
124 bool IsV3 = VT && VT->getNumElements() == 3;
Matt Arsenaultb5613ec2018-12-07 22:12:17 +0000125 bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
126
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000127 VectorType *V4Ty = nullptr;
128
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000129 int64_t AlignDownOffset = alignDown(EltOffset, 4);
130 int64_t OffsetDiff = EltOffset - AlignDownOffset;
Matt Arsenaultb5613ec2018-12-07 22:12:17 +0000131 unsigned AdjustedAlign = MinAlign(DoShiftOpt ? AlignDownOffset : EltOffset,
132 KernArgBaseAlign);
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000133
134 Value *ArgPtr;
Matt Arsenaultb5613ec2018-12-07 22:12:17 +0000135 if (DoShiftOpt) { // FIXME: Handle aggregate types
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000136 // Since we don't have sub-dword scalar loads, avoid doing an extload by
137 // loading earlier than the argument address, and extracting the relevant
138 // bits.
139 //
140 // Additionally widen any sub-dword load to i32 even if suitably aligned,
141 // so that CSE between different argument loads works easily.
142
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000143 ArgPtr = Builder.CreateConstInBoundsGEP1_64(
144 KernArgSegment,
145 AlignDownOffset,
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000146 Arg.getName() + ".kernarg.offset.align.down");
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000147 ArgPtr = Builder.CreateBitCast(ArgPtr,
148 Builder.getInt32Ty()->getPointerTo(AS),
149 ArgPtr->getName() + ".cast");
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000150 } else {
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000151 ArgPtr = Builder.CreateConstInBoundsGEP1_64(
152 KernArgSegment,
Matt Arsenaultb5613ec2018-12-07 22:12:17 +0000153 EltOffset,
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000154 Arg.getName() + ".kernarg.offset");
155 ArgPtr = Builder.CreateBitCast(ArgPtr, ArgTy->getPointerTo(AS),
156 ArgPtr->getName() + ".cast");
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000157 }
158
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000159 if (IsV3 && Size >= 32) {
160 V4Ty = VectorType::get(VT->getVectorElementType(), 4);
161 // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
162 ArgPtr = Builder.CreateBitCast(ArgPtr, V4Ty->getPointerTo(AS));
163 }
164
165 LoadInst *Load = Builder.CreateAlignedLoad(ArgPtr, AdjustedAlign);
166 Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
167
168 MDBuilder MDB(Ctx);
169
170 if (isa<PointerType>(ArgTy)) {
171 if (Arg.hasNonNullAttr())
172 Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
173
174 uint64_t DerefBytes = Arg.getDereferenceableBytes();
175 if (DerefBytes != 0) {
176 Load->setMetadata(
177 LLVMContext::MD_dereferenceable,
178 MDNode::get(Ctx,
179 MDB.createConstant(
180 ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
181 }
182
183 uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
184 if (DerefOrNullBytes != 0) {
185 Load->setMetadata(
186 LLVMContext::MD_dereferenceable_or_null,
187 MDNode::get(Ctx,
188 MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
189 DerefOrNullBytes))));
190 }
191
192 unsigned ParamAlign = Arg.getParamAlignment();
193 if (ParamAlign != 0) {
194 Load->setMetadata(
195 LLVMContext::MD_align,
196 MDNode::get(Ctx,
197 MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
198 ParamAlign))));
199 }
200 }
201
202 // TODO: Convert noalias arg to !noalias
203
Matt Arsenaultb5613ec2018-12-07 22:12:17 +0000204 if (DoShiftOpt) {
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000205 Value *ExtractBits = OffsetDiff == 0 ?
206 Load : Builder.CreateLShr(Load, OffsetDiff * 8);
207
208 IntegerType *ArgIntTy = Builder.getIntNTy(Size);
209 Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
210 Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
211 Arg.getName() + ".load");
212 Arg.replaceAllUsesWith(NewVal);
213 } else if (IsV3) {
214 Value *Shuf = Builder.CreateShuffleVector(Load, UndefValue::get(V4Ty),
215 {0, 1, 2},
216 Arg.getName() + ".load");
217 Arg.replaceAllUsesWith(Shuf);
218 } else {
219 Load->setName(Arg.getName() + ".load");
220 Arg.replaceAllUsesWith(Load);
221 }
222 }
223
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000224 KernArgSegment->addAttribute(
225 AttributeList::ReturnIndex,
226 Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
227
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000228 return true;
229}
230
231INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
232 "AMDGPU Lower Kernel Arguments", false, false)
233INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
234 false, false)
235
236char AMDGPULowerKernelArguments::ID = 0;
237
238FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
239 return new AMDGPULowerKernelArguments();
240}