blob: 8cc7e38f7b29edb53af70e69aafb3a3591659c94 [file] [log] [blame]
Matt Arsenault8c4a3522018-06-26 19:10:00 +00001//===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file This pass replaces accesses to kernel arguments with loads from
11/// offsets from the kernarg base pointer.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUSubtarget.h"
17#include "AMDGPUTargetMachine.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/Analysis/DivergenceAnalysis.h"
20#include "llvm/Analysis/Loads.h"
21#include "llvm/CodeGen/Passes.h"
22#include "llvm/CodeGen/TargetPassConfig.h"
23#include "llvm/IR/Attributes.h"
24#include "llvm/IR/BasicBlock.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Function.h"
28#include "llvm/IR/IRBuilder.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Instruction.h"
31#include "llvm/IR/Instructions.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/MDBuilder.h"
34#include "llvm/IR/Metadata.h"
35#include "llvm/IR/Operator.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Pass.h"
39#include "llvm/Support/Casting.h"
40
41#define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
42
43using namespace llvm;
44
45namespace {
46
47class AMDGPULowerKernelArguments : public FunctionPass{
48public:
49 static char ID;
50
51 AMDGPULowerKernelArguments() : FunctionPass(ID) {}
52
53 bool runOnFunction(Function &F) override;
54
55 void getAnalysisUsage(AnalysisUsage &AU) const override {
56 AU.addRequired<TargetPassConfig>();
57 AU.setPreservesAll();
58 }
59};
60
61} // end anonymous namespace
62
63bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
64 CallingConv::ID CC = F.getCallingConv();
65 if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
66 return false;
67
68 auto &TPC = getAnalysis<TargetPassConfig>();
69
70 const TargetMachine &TM = TPC.getTM<TargetMachine>();
Tom Stellard5bfbae52018-07-11 20:59:01 +000071 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
Matt Arsenault8c4a3522018-06-26 19:10:00 +000072 LLVMContext &Ctx = F.getParent()->getContext();
73 const DataLayout &DL = F.getParent()->getDataLayout();
74 BasicBlock &EntryBlock = *F.begin();
75 IRBuilder<> Builder(&*EntryBlock.begin());
76
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +000077 const unsigned KernArgBaseAlign = 16; // FIXME: Increase if necessary
Matt Arsenault8c4a3522018-06-26 19:10:00 +000078 const uint64_t BaseOffset = ST.getExplicitKernelArgOffset(F);
79
Matt Arsenault4bec7d42018-07-20 09:05:08 +000080 unsigned MaxAlign;
Matt Arsenault8c4a3522018-06-26 19:10:00 +000081 // FIXME: Alignment is broken broken with explicit arg offset.;
Matt Arsenault4bec7d42018-07-20 09:05:08 +000082 const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
Matt Arsenault513e0c02018-06-28 10:18:11 +000083 if (TotalKernArgSize == 0)
84 return false;
Matt Arsenault8c4a3522018-06-26 19:10:00 +000085
86 CallInst *KernArgSegment =
87 Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, nullptr,
88 F.getName() + ".kernarg.segment");
89
90 KernArgSegment->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
91 KernArgSegment->addAttribute(AttributeList::ReturnIndex,
92 Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
Matt Arsenault8c4a3522018-06-26 19:10:00 +000093
94 unsigned AS = KernArgSegment->getType()->getPointerAddressSpace();
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +000095 uint64_t ExplicitArgOffset = 0;
96
Matt Arsenault8c4a3522018-06-26 19:10:00 +000097 for (Argument &Arg : F.args()) {
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +000098 Type *ArgTy = Arg.getType();
99 unsigned Align = DL.getABITypeAlignment(ArgTy);
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000100 unsigned Size = DL.getTypeSizeInBits(ArgTy);
101 unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
102
103
104 // Clover seems to always pad i8/i16 to i32, but doesn't properly align
105 // them?
106 // Make sure the struct elements have correct size and alignment for ext
107 // args. These seem to be padded up to 4-bytes but not correctly aligned.
108 bool IsExtArg = AllocSize < 32 && (Arg.hasZExtAttr() || Arg.hasSExtAttr()) &&
109 !ST.isAmdHsaOS();
110 if (IsExtArg)
111 AllocSize = 4;
112
113 uint64_t EltOffset = alignTo(ExplicitArgOffset, Align) + BaseOffset;
114 ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
115
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000116 if (Arg.use_empty())
117 continue;
118
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000119 if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
120 // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
121 // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
122 // can't represent this with range metadata because it's only allowed for
123 // integer types.
124 if (PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
125 ST.getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
126 continue;
127
128 // FIXME: We can replace this with equivalent alias.scope/noalias
129 // metadata, but this appears to be a lot of work.
130 if (Arg.hasNoAliasAttr())
131 continue;
132 }
133
134 VectorType *VT = dyn_cast<VectorType>(ArgTy);
135 bool IsV3 = VT && VT->getNumElements() == 3;
136 VectorType *V4Ty = nullptr;
137
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000138 int64_t AlignDownOffset = alignDown(EltOffset, 4);
139 int64_t OffsetDiff = EltOffset - AlignDownOffset;
140 unsigned AdjustedAlign = MinAlign(KernArgBaseAlign, AlignDownOffset);
141
142 Value *ArgPtr;
Matt Arsenault513e0c02018-06-28 10:18:11 +0000143 if (Size < 32 && !ArgTy->isAggregateType()) { // FIXME: Handle aggregate types
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000144 // Since we don't have sub-dword scalar loads, avoid doing an extload by
145 // loading earlier than the argument address, and extracting the relevant
146 // bits.
147 //
148 // Additionally widen any sub-dword load to i32 even if suitably aligned,
149 // so that CSE between different argument loads works easily.
150
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000151 ArgPtr = Builder.CreateConstInBoundsGEP1_64(
152 KernArgSegment,
153 AlignDownOffset,
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000154 Arg.getName() + ".kernarg.offset.align.down");
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000155 ArgPtr = Builder.CreateBitCast(ArgPtr,
156 Builder.getInt32Ty()->getPointerTo(AS),
157 ArgPtr->getName() + ".cast");
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000158 } else {
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000159 ArgPtr = Builder.CreateConstInBoundsGEP1_64(
160 KernArgSegment,
161 AlignDownOffset,
162 Arg.getName() + ".kernarg.offset");
163 ArgPtr = Builder.CreateBitCast(ArgPtr, ArgTy->getPointerTo(AS),
164 ArgPtr->getName() + ".cast");
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000165 }
166
167 assert((!IsExtArg || !IsV3) && "incompatible situation");
168
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000169 if (IsV3 && Size >= 32) {
170 V4Ty = VectorType::get(VT->getVectorElementType(), 4);
171 // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
172 ArgPtr = Builder.CreateBitCast(ArgPtr, V4Ty->getPointerTo(AS));
173 }
174
175 LoadInst *Load = Builder.CreateAlignedLoad(ArgPtr, AdjustedAlign);
176 Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
177
178 MDBuilder MDB(Ctx);
179
180 if (isa<PointerType>(ArgTy)) {
181 if (Arg.hasNonNullAttr())
182 Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
183
184 uint64_t DerefBytes = Arg.getDereferenceableBytes();
185 if (DerefBytes != 0) {
186 Load->setMetadata(
187 LLVMContext::MD_dereferenceable,
188 MDNode::get(Ctx,
189 MDB.createConstant(
190 ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
191 }
192
193 uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
194 if (DerefOrNullBytes != 0) {
195 Load->setMetadata(
196 LLVMContext::MD_dereferenceable_or_null,
197 MDNode::get(Ctx,
198 MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
199 DerefOrNullBytes))));
200 }
201
202 unsigned ParamAlign = Arg.getParamAlignment();
203 if (ParamAlign != 0) {
204 Load->setMetadata(
205 LLVMContext::MD_align,
206 MDNode::get(Ctx,
207 MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
208 ParamAlign))));
209 }
210 }
211
212 // TODO: Convert noalias arg to !noalias
213
Matt Arsenault513e0c02018-06-28 10:18:11 +0000214 if (Size < 32 && !ArgTy->isAggregateType()) {
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000215 if (IsExtArg && OffsetDiff == 0) {
216 Type *I32Ty = Builder.getInt32Ty();
217 bool IsSext = Arg.hasSExtAttr();
218 Metadata *LowAndHigh[] = {
219 ConstantAsMetadata::get(
220 ConstantInt::get(I32Ty, IsSext ? minIntN(Size) : 0)),
221 ConstantAsMetadata::get(
222 ConstantInt::get(I32Ty,
223 IsSext ? maxIntN(Size) + 1 : maxUIntN(Size) + 1))
224 };
225
226 Load->setMetadata(LLVMContext::MD_range, MDNode::get(Ctx, LowAndHigh));
227 }
228
229 Value *ExtractBits = OffsetDiff == 0 ?
230 Load : Builder.CreateLShr(Load, OffsetDiff * 8);
231
232 IntegerType *ArgIntTy = Builder.getIntNTy(Size);
233 Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
234 Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
235 Arg.getName() + ".load");
236 Arg.replaceAllUsesWith(NewVal);
237 } else if (IsV3) {
238 Value *Shuf = Builder.CreateShuffleVector(Load, UndefValue::get(V4Ty),
239 {0, 1, 2},
240 Arg.getName() + ".load");
241 Arg.replaceAllUsesWith(Shuf);
242 } else {
243 Load->setName(Arg.getName() + ".load");
244 Arg.replaceAllUsesWith(Load);
245 }
246 }
247
Matt Arsenaultf5be3ad2018-06-29 17:31:42 +0000248 KernArgSegment->addAttribute(
249 AttributeList::ReturnIndex,
250 Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
251
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000252 return true;
253}
254
255INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
256 "AMDGPU Lower Kernel Arguments", false, false)
257INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
258 false, false)
259
260char AMDGPULowerKernelArguments::ID = 0;
261
262FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
263 return new AMDGPULowerKernelArguments();
264}