blob: d0c99aeca0a62f85d60c944b13c52e64a971528e [file] [log] [blame]
Tom Stellardd8ea85a2016-12-21 19:06:24 +00001//===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
Tom Stellard000c5af2016-04-14 19:09:28 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard000c5af2016-04-14 19:09:28 +00006//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12///
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000016#include "AMDGPU.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000017#include "AMDGPUISelLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000018#include "AMDGPUSubtarget.h"
19#include "SIISelLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000020#include "SIMachineFunctionInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000021#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000022#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard206b9922019-04-09 02:26:03 +000023#include "llvm/CodeGen/Analysis.h"
Tom Stellardca166212017-01-30 21:56:46 +000024#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000025#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellard206b9922019-04-09 02:26:03 +000027#include "llvm/Support/LowLevelTypeImpl.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000028
29using namespace llvm;
30
Tom Stellard206b9922019-04-09 02:26:03 +000031namespace {
32
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +000033struct OutgoingValueHandler : public CallLowering::ValueHandler {
34 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
35 MachineInstrBuilder MIB, CCAssignFn *AssignFn)
Tom Stellard206b9922019-04-09 02:26:03 +000036 : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
37
38 MachineInstrBuilder MIB;
39
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +000040 Register getStackAddress(uint64_t Size, int64_t Offset,
Tom Stellard206b9922019-04-09 02:26:03 +000041 MachinePointerInfo &MPO) override {
42 llvm_unreachable("not implemented");
43 }
44
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +000045 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
Tom Stellard206b9922019-04-09 02:26:03 +000046 MachinePointerInfo &MPO, CCValAssign &VA) override {
47 llvm_unreachable("not implemented");
48 }
49
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +000050 void assignValueToReg(Register ValVReg, Register PhysReg,
Tom Stellard206b9922019-04-09 02:26:03 +000051 CCValAssign &VA) override {
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +000052 Register ExtReg;
53 if (VA.getLocVT().getSizeInBits() < 32) {
54 // 16-bit types are reported as legal for 32-bit registers. We need to
55 // extend and do a 32-bit copy to avoid the verifier complaining about it.
56 ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0);
57 } else
58 ExtReg = extendRegister(ValVReg, VA);
59
60 MIRBuilder.buildCopy(PhysReg, ExtReg);
61 MIB.addUse(PhysReg, RegState::Implicit);
Tom Stellard206b9922019-04-09 02:26:03 +000062 }
63
64 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
65 CCValAssign::LocInfo LocInfo,
66 const CallLowering::ArgInfo &Info,
67 CCState &State) override {
68 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
69 }
70};
71
Matt Arsenaultfecf43e2019-07-19 14:15:18 +000072struct IncomingArgHandler : public CallLowering::ValueHandler {
73 uint64_t StackUsed = 0;
74
75 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
76 CCAssignFn *AssignFn)
77 : ValueHandler(MIRBuilder, MRI, AssignFn) {}
78
79 Register getStackAddress(uint64_t Size, int64_t Offset,
80 MachinePointerInfo &MPO) override {
81 auto &MFI = MIRBuilder.getMF().getFrameInfo();
82 int FI = MFI.CreateFixedObject(Size, Offset, true);
83 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
84 Register AddrReg = MRI.createGenericVirtualRegister(
85 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32));
86 MIRBuilder.buildFrameIndex(AddrReg, FI);
87 StackUsed = std::max(StackUsed, Size + Offset);
88 return AddrReg;
89 }
90
91 void assignValueToReg(Register ValVReg, Register PhysReg,
92 CCValAssign &VA) override {
93 markPhysRegUsed(PhysReg);
94
95 if (VA.getLocVT().getSizeInBits() < 32) {
96 // 16-bit types are reported as legal for 32-bit registers. We need to do
97 // a 32-bit copy, and truncate to avoid the verifier complaining about it.
98 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg);
99 MIRBuilder.buildTrunc(ValVReg, Copy);
100 return;
101 }
102
103 switch (VA.getLocInfo()) {
104 case CCValAssign::LocInfo::SExt:
105 case CCValAssign::LocInfo::ZExt:
106 case CCValAssign::LocInfo::AExt: {
107 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
108 MIRBuilder.buildTrunc(ValVReg, Copy);
109 break;
110 }
111 default:
112 MIRBuilder.buildCopy(ValVReg, PhysReg);
113 break;
114 }
115 }
116
117 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
118 MachinePointerInfo &MPO, CCValAssign &VA) override {
119 // FIXME: Get alignment
120 auto MMO = MIRBuilder.getMF().getMachineMemOperand(
121 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, 1);
122 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
123 }
124
125 /// How the physical register gets marked varies between formal
126 /// parameters (it's a basic-block live-in), and a call instruction
127 /// (it's an implicit-def of the BL).
128 virtual void markPhysRegUsed(unsigned PhysReg) = 0;
129
130 // FIXME: What is the point of this being a callback?
131 bool isArgumentHandler() const override { return true; }
132};
133
134struct FormalArgHandler : public IncomingArgHandler {
135 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
136 CCAssignFn *AssignFn)
137 : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
138
139 void markPhysRegUsed(unsigned PhysReg) override {
140 MIRBuilder.getMBB().addLiveIn(PhysReg);
141 }
142};
143
Tom Stellard206b9922019-04-09 02:26:03 +0000144}
145
Tom Stellard000c5af2016-04-14 19:09:28 +0000146AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
Matt Arsenault0da63502018-08-31 05:49:54 +0000147 : CallLowering(&TLI) {
Tom Stellard000c5af2016-04-14 19:09:28 +0000148}
149
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000150void AMDGPUCallLowering::splitToValueTypes(
151 const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
152 const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv,
153 SplitArgTy PerformArgSplit) const {
154 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
155 LLVMContext &Ctx = OrigArg.Ty->getContext();
156
157 if (OrigArg.Ty->isVoidTy())
158 return;
159
160 SmallVector<EVT, 4> SplitVTs;
161 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs);
162
Matt Arsenaultb60a2ae2019-07-19 14:29:30 +0000163 assert(OrigArg.Regs.size() == SplitVTs.size());
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000164
Matt Arsenaultb60a2ae2019-07-19 14:29:30 +0000165 int SplitIdx = 0;
166 for (EVT VT : SplitVTs) {
167 unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT);
168 Type *Ty = VT.getTypeForEVT(Ctx);
169
170
171
172 if (NumParts == 1) {
173 // No splitting to do, but we want to replace the original type (e.g. [1 x
174 // double] -> double).
175 SplitArgs.emplace_back(OrigArg.Regs[SplitIdx], Ty,
176 OrigArg.Flags, OrigArg.IsFixed);
177
178 ++SplitIdx;
179 continue;
180 }
181
182 LLT LLTy = getLLTForType(*Ty, DL);
183
184 SmallVector<Register, 8> SplitRegs;
185
186 EVT PartVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT);
187 Type *PartTy = PartVT.getTypeForEVT(Ctx);
188 LLT PartLLT = getLLTForType(*PartTy, DL);
189
190 // FIXME: Should we be reporting all of the part registers for a single
191 // argument, and let handleAssignments take care of the repacking?
192 for (unsigned i = 0; i < NumParts; ++i) {
193 Register PartReg = MRI.createGenericVirtualRegister(PartLLT);
194 SplitRegs.push_back(PartReg);
195 SplitArgs.emplace_back(ArrayRef<Register>(PartReg), PartTy, OrigArg.Flags);
196 }
197
198 PerformArgSplit(SplitRegs, LLTy, PartLLT, SplitIdx);
199
200 ++SplitIdx;
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000201 }
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000202}
203
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000204// Get the appropriate type to make \p OrigTy \p Factor times bigger.
205static LLT getMultipleType(LLT OrigTy, int Factor) {
206 if (OrigTy.isVector()) {
207 return LLT::vector(OrigTy.getNumElements() * Factor,
208 OrigTy.getElementType());
209 }
210
211 return LLT::scalar(OrigTy.getSizeInBits() * Factor);
212}
213
214// TODO: Move to generic code
215static void unpackRegsToOrigType(MachineIRBuilder &MIRBuilder,
216 ArrayRef<Register> DstRegs,
217 Register SrcReg,
218 LLT SrcTy,
219 LLT PartTy) {
220 assert(DstRegs.size() > 1 && "Nothing to unpack");
221
222 MachineFunction &MF = MIRBuilder.getMF();
223 MachineRegisterInfo &MRI = MF.getRegInfo();
224
225 const unsigned SrcSize = SrcTy.getSizeInBits();
226 const unsigned PartSize = PartTy.getSizeInBits();
227
228 if (SrcTy.isVector() && !PartTy.isVector() &&
229 PartSize > SrcTy.getElementType().getSizeInBits()) {
230 // Vector was scalarized, and the elements extended.
231 auto UnmergeToEltTy = MIRBuilder.buildUnmerge(SrcTy.getElementType(),
232 SrcReg);
233 for (int i = 0, e = DstRegs.size(); i != e; ++i)
234 MIRBuilder.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
235 return;
236 }
237
238 if (SrcSize % PartSize == 0) {
239 MIRBuilder.buildUnmerge(DstRegs, SrcReg);
240 return;
241 }
242
243 const int NumRoundedParts = (SrcSize + PartSize - 1) / PartSize;
244
245 LLT BigTy = getMultipleType(PartTy, NumRoundedParts);
246 auto ImpDef = MIRBuilder.buildUndef(BigTy);
247
248 Register BigReg = MRI.createGenericVirtualRegister(BigTy);
249 MIRBuilder.buildInsert(BigReg, ImpDef.getReg(0), SrcReg, 0).getReg(0);
250
251 int64_t Offset = 0;
252 for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize)
253 MIRBuilder.buildExtract(DstRegs[i], BigReg, Offset);
254}
255
256/// Lower the return value for the already existing \p Ret. This assumes that
257/// \p MIRBuilder's insertion point is correct.
258bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
259 const Value *Val, ArrayRef<Register> VRegs,
260 MachineInstrBuilder &Ret) const {
261 if (!Val)
262 return true;
263
264 auto &MF = MIRBuilder.getMF();
265 const auto &F = MF.getFunction();
266 const DataLayout &DL = MF.getDataLayout();
267
268 CallingConv::ID CC = F.getCallingConv();
269 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
270 MachineRegisterInfo &MRI = MF.getRegInfo();
271
272 ArgInfo OrigRetInfo(VRegs, Val->getType());
273 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F);
274 SmallVector<ArgInfo, 4> SplitRetInfos;
275
276 splitToValueTypes(
277 OrigRetInfo, SplitRetInfos, DL, MRI, CC,
278 [&](ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, int VTSplitIdx) {
279 unpackRegsToOrigType(MIRBuilder, Regs, VRegs[VTSplitIdx], LLTy, PartLLT);
280 });
281
282 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg());
283
284 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret, AssignFn);
285 return handleAssignments(MIRBuilder, SplitRetInfos, RetHandler);
286}
287
Tom Stellard000c5af2016-04-14 19:09:28 +0000288bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
Alexander Ivchenko49168f62018-08-02 08:33:31 +0000289 const Value *Val,
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000290 ArrayRef<Register> VRegs) const {
Tom Stellard206b9922019-04-09 02:26:03 +0000291
292 MachineFunction &MF = MIRBuilder.getMF();
293 MachineRegisterInfo &MRI = MF.getRegInfo();
294 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
295 MFI->setIfReturnsVoid(!Val);
296
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000297 assert(!Val == VRegs.empty() && "Return value without a vreg");
298
299 CallingConv::ID CC = MIRBuilder.getMF().getFunction().getCallingConv();
300 const bool IsShader = AMDGPU::isShader(CC);
301 const bool IsWaveEnd = (IsShader && MFI->returnsVoid()) ||
302 AMDGPU::isKernel(CC);
303 if (IsWaveEnd) {
304 MIRBuilder.buildInstr(AMDGPU::S_ENDPGM)
305 .addImm(0);
Tom Stellard206b9922019-04-09 02:26:03 +0000306 return true;
307 }
308
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000309 auto const &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>();
Tom Stellard206b9922019-04-09 02:26:03 +0000310
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000311 unsigned ReturnOpc = ReturnOpc = IsShader ?
312 AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return;
Tom Stellard257882f2018-04-24 21:29:36 +0000313
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000314 auto Ret = MIRBuilder.buildInstrNoInsert(ReturnOpc);
315 Register ReturnAddrVReg;
316 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) {
317 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass);
318 Ret.addUse(ReturnAddrVReg);
Tom Stellard206b9922019-04-09 02:26:03 +0000319 }
Tom Stellard206b9922019-04-09 02:26:03 +0000320
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000321 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
322 return false;
323
324 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) {
325 const SIRegisterInfo *TRI = ST.getRegisterInfo();
326 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF),
327 &AMDGPU::SGPR_64RegClass);
328 MIRBuilder.buildCopy(ReturnAddrVReg, LiveInReturn);
329 }
330
331 // TODO: Handle CalleeSavedRegsViaCopy.
332
333 MIRBuilder.insertInstr(Ret);
Tom Stellard000c5af2016-04-14 19:09:28 +0000334 return true;
335}
336
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000337Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
Tom Stellardca166212017-01-30 21:56:46 +0000338 Type *ParamTy,
Matt Arsenault29f30372018-07-05 17:01:20 +0000339 uint64_t Offset) const {
Tom Stellardca166212017-01-30 21:56:46 +0000340
341 MachineFunction &MF = MIRBuilder.getMF();
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000342 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardca166212017-01-30 21:56:46 +0000343 MachineRegisterInfo &MRI = MF.getRegInfo();
Matthias Braunf1caa282017-12-15 22:22:58 +0000344 const Function &F = MF.getFunction();
Tom Stellardca166212017-01-30 21:56:46 +0000345 const DataLayout &DL = F.getParent()->getDataLayout();
Matt Arsenault0da63502018-08-31 05:49:54 +0000346 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
Daniel Sanders52b4ce72017-03-07 23:20:35 +0000347 LLT PtrType = getLLTForType(*PtrTy, DL);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000348 Register DstReg = MRI.createGenericVirtualRegister(PtrType);
349 Register KernArgSegmentPtr =
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000350 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000351 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
Tom Stellardca166212017-01-30 21:56:46 +0000352
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000353 Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
Tom Stellardca166212017-01-30 21:56:46 +0000354 MIRBuilder.buildConstant(OffsetReg, Offset);
355
356 MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
357
358 return DstReg;
359}
360
361void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
Matt Arsenault29f30372018-07-05 17:01:20 +0000362 Type *ParamTy, uint64_t Offset,
363 unsigned Align,
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000364 Register DstReg) const {
Tom Stellardca166212017-01-30 21:56:46 +0000365 MachineFunction &MF = MIRBuilder.getMF();
Matthias Braunf1caa282017-12-15 22:22:58 +0000366 const Function &F = MF.getFunction();
Tom Stellardca166212017-01-30 21:56:46 +0000367 const DataLayout &DL = F.getParent()->getDataLayout();
Matt Arsenault0da63502018-08-31 05:49:54 +0000368 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellardca166212017-01-30 21:56:46 +0000369 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
370 unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000371 Register PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
Tom Stellardca166212017-01-30 21:56:46 +0000372
373 MachineMemOperand *MMO =
374 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
Matt Arsenault7df225d2019-07-19 17:52:56 +0000375 MachineMemOperand::MODereferenceable |
Tom Stellardca166212017-01-30 21:56:46 +0000376 MachineMemOperand::MOInvariant,
377 TypeSize, Align);
378
379 MIRBuilder.buildLoad(DstReg, PtrReg, *MMO);
380}
381
Matt Arsenaultbae36362019-07-01 18:50:50 +0000382// Allocate special inputs passed in user SGPRs.
383static void allocateHSAUserSGPRs(CCState &CCInfo,
384 MachineIRBuilder &MIRBuilder,
385 MachineFunction &MF,
386 const SIRegisterInfo &TRI,
387 SIMachineFunctionInfo &Info) {
388 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
389 if (Info.hasPrivateSegmentBuffer()) {
390 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
391 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
392 CCInfo.AllocateReg(PrivateSegmentBufferReg);
393 }
394
395 if (Info.hasDispatchPtr()) {
396 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
397 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
398 CCInfo.AllocateReg(DispatchPtrReg);
399 }
400
401 if (Info.hasQueuePtr()) {
402 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
403 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
404 CCInfo.AllocateReg(QueuePtrReg);
405 }
406
407 if (Info.hasKernargSegmentPtr()) {
408 MachineRegisterInfo &MRI = MF.getRegInfo();
409 Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
410 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
411 Register VReg = MRI.createGenericVirtualRegister(P4);
412 MRI.addLiveIn(InputPtrReg, VReg);
413 MIRBuilder.getMBB().addLiveIn(InputPtrReg);
414 MIRBuilder.buildCopy(VReg, InputPtrReg);
415 CCInfo.AllocateReg(InputPtrReg);
416 }
417
418 if (Info.hasDispatchID()) {
419 unsigned DispatchIDReg = Info.addDispatchID(TRI);
420 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
421 CCInfo.AllocateReg(DispatchIDReg);
422 }
423
424 if (Info.hasFlatScratchInit()) {
425 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
426 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
427 CCInfo.AllocateReg(FlatScratchInitReg);
428 }
429
430 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
431 // these from the dispatch pointer.
432}
433
Matt Arsenaultb725d272019-07-11 14:18:25 +0000434bool AMDGPUCallLowering::lowerFormalArgumentsKernel(
435 MachineIRBuilder &MIRBuilder, const Function &F,
436 ArrayRef<ArrayRef<Register>> VRegs) const {
437 MachineFunction &MF = MIRBuilder.getMF();
438 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
439 MachineRegisterInfo &MRI = MF.getRegInfo();
440 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000441 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
442 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
443
Matt Arsenaultb725d272019-07-11 14:18:25 +0000444 const DataLayout &DL = F.getParent()->getDataLayout();
445
446 SmallVector<CCValAssign, 16> ArgLocs;
447 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
448
449 allocateHSAUserSGPRs(CCInfo, MIRBuilder, MF, *TRI, *Info);
450
451 unsigned i = 0;
452 const unsigned KernArgBaseAlign = 16;
453 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F);
454 uint64_t ExplicitArgOffset = 0;
455
456 // TODO: Align down to dword alignment and extract bits for extending loads.
457 for (auto &Arg : F.args()) {
458 Type *ArgTy = Arg.getType();
459 unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
460 if (AllocSize == 0)
461 continue;
462
463 unsigned ABIAlign = DL.getABITypeAlignment(ArgTy);
464
465 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
466 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
467
468 ArrayRef<Register> OrigArgRegs = VRegs[i];
469 Register ArgReg =
470 OrigArgRegs.size() == 1
471 ? OrigArgRegs[0]
472 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL));
473 unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset);
474 ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy));
475 lowerParameter(MIRBuilder, ArgTy, ArgOffset, Align, ArgReg);
476 if (OrigArgRegs.size() > 1)
477 unpackRegs(OrigArgRegs, ArgReg, ArgTy, MIRBuilder);
478 ++i;
479 }
480
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000481 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
482 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false);
Matt Arsenaultb725d272019-07-11 14:18:25 +0000483 return true;
484}
485
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000486// TODO: Move this to generic code
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000487static void packSplitRegsToOrigType(MachineIRBuilder &MIRBuilder,
488 ArrayRef<Register> OrigRegs,
489 ArrayRef<Register> Regs,
490 LLT LLTy,
491 LLT PartLLT) {
492 if (!LLTy.isVector() && !PartLLT.isVector()) {
493 MIRBuilder.buildMerge(OrigRegs[0], Regs);
494 return;
495 }
496
497 if (LLTy.isVector() && PartLLT.isVector()) {
498 assert(LLTy.getElementType() == PartLLT.getElementType());
499
500 int DstElts = LLTy.getNumElements();
501 int PartElts = PartLLT.getNumElements();
502 if (DstElts % PartElts == 0)
503 MIRBuilder.buildConcatVectors(OrigRegs[0], Regs);
504 else {
505 // Deal with v3s16 split into v2s16
506 assert(PartElts == 2 && DstElts % 2 != 0);
507 int RoundedElts = PartElts * ((DstElts + PartElts - 1) / PartElts);
508
509 LLT RoundedDestTy = LLT::vector(RoundedElts, PartLLT.getElementType());
510 auto RoundedConcat = MIRBuilder.buildConcatVectors(RoundedDestTy, Regs);
511 MIRBuilder.buildExtract(OrigRegs[0], RoundedConcat, 0);
512 }
513
514 return;
515 }
516
517 assert(LLTy.isVector() && !PartLLT.isVector());
518
519 LLT DstEltTy = LLTy.getElementType();
520 if (DstEltTy == PartLLT) {
521 // Vector was trivially scalarized.
522 MIRBuilder.buildBuildVector(OrigRegs[0], Regs);
523 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
524 // Deal with vector with 64-bit elements decomposed to 32-bit
525 // registers. Need to create intermediate 64-bit elements.
526 SmallVector<Register, 8> EltMerges;
527 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
528
529 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
530
531 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
532 auto Merge = MIRBuilder.buildMerge(DstEltTy,
533 Regs.take_front(PartsPerElt));
534 EltMerges.push_back(Merge.getReg(0));
535 Regs = Regs.drop_front(PartsPerElt);
536 }
537
538 MIRBuilder.buildBuildVector(OrigRegs[0], EltMerges);
539 } else {
540 // Vector was split, and elements promoted to a wider type.
541 LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT);
542 auto BV = MIRBuilder.buildBuildVector(BVType, Regs);
543 MIRBuilder.buildTrunc(OrigRegs[0], BV);
544 }
545}
546
Diana Picusc3dbe232019-06-27 08:54:17 +0000547bool AMDGPUCallLowering::lowerFormalArguments(
548 MachineIRBuilder &MIRBuilder, const Function &F,
549 ArrayRef<ArrayRef<Register>> VRegs) const {
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000550 CallingConv::ID CC = F.getCallingConv();
551
Matt Arsenaultb725d272019-07-11 14:18:25 +0000552 // The infrastructure for normal calling convention lowering is essentially
553 // useless for kernels. We want to avoid any kind of legalization or argument
554 // splitting.
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000555 if (CC == CallingConv::AMDGPU_KERNEL)
Matt Arsenaultb725d272019-07-11 14:18:25 +0000556 return lowerFormalArgumentsKernel(MIRBuilder, F, VRegs);
557
Tom Stellard37444282018-05-07 22:17:54 +0000558 // AMDGPU_GS and AMDGP_HS are not supported yet.
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000559 if (CC == CallingConv::AMDGPU_GS || CC == CallingConv::AMDGPU_HS)
Tom Stellard6c814182018-04-30 15:15:23 +0000560 return false;
Tom Stellardca166212017-01-30 21:56:46 +0000561
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000562 const bool IsShader = AMDGPU::isShader(CC);
563 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC);
564
Tom Stellardca166212017-01-30 21:56:46 +0000565 MachineFunction &MF = MIRBuilder.getMF();
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000566 MachineBasicBlock &MBB = MIRBuilder.getMBB();
Tom Stellardca166212017-01-30 21:56:46 +0000567 MachineRegisterInfo &MRI = MF.getRegInfo();
568 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000569 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
570 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo();
Tom Stellardca166212017-01-30 21:56:46 +0000571 const DataLayout &DL = F.getParent()->getDataLayout();
572
Matt Arsenaulte0a4da82019-05-30 19:33:18 +0000573
Tom Stellardca166212017-01-30 21:56:46 +0000574 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000575 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext());
Tom Stellardca166212017-01-30 21:56:46 +0000576
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000577 if (!IsEntryFunc) {
578 Register ReturnAddrReg = TRI->getReturnAddressReg(MF);
579 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg,
580 &AMDGPU::SGPR_64RegClass);
581 MBB.addLiveIn(ReturnAddrReg);
582 MIRBuilder.buildCopy(LiveInReturn, ReturnAddrReg);
583 }
584
Matt Arsenaultbae36362019-07-01 18:50:50 +0000585 if (Info->hasImplicitBufferPtr()) {
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000586 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI);
Matt Arsenaultbae36362019-07-01 18:50:50 +0000587 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
588 CCInfo.AllocateReg(ImplicitBufferPtrReg);
589 }
590
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000591
592 SmallVector<ArgInfo, 32> SplitArgs;
593 unsigned Idx = 0;
Tom Stellardc7709e12018-04-24 20:51:28 +0000594 unsigned PSInputNum = 0;
Tom Stellard9d8337d2017-08-01 12:38:33 +0000595
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000596 for (auto &Arg : F.args()) {
597 if (DL.getTypeStoreSize(Arg.getType()) == 0)
598 continue;
Tom Stellardc7709e12018-04-24 20:51:28 +0000599
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000600 const bool InReg = Arg.hasAttribute(Attribute::InReg);
601
602 // SGPR arguments to functions not implemented.
603 if (!IsShader && InReg)
604 return false;
605
Matt Arsenaulta9ea8a92019-07-26 02:36:05 +0000606 if (Arg.hasAttribute(Attribute::SwiftSelf) ||
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000607 Arg.hasAttribute(Attribute::SwiftError) ||
Matt Arsenaultb60a2ae2019-07-19 14:29:30 +0000608 Arg.hasAttribute(Attribute::Nest))
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000609 return false;
610
611 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) {
612 const bool ArgUsed = !Arg.use_empty();
613 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum);
614
615 if (!SkipArg) {
616 Info->markPSInputAllocated(PSInputNum);
617 if (ArgUsed)
618 Info->markPSInputEnabled(PSInputNum);
Tom Stellardc7709e12018-04-24 20:51:28 +0000619 }
620
Tom Stellardc7709e12018-04-24 20:51:28 +0000621 ++PSInputNum;
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000622
623 if (SkipArg) {
Matt Arsenaultb60a2ae2019-07-19 14:29:30 +0000624 for (int I = 0, E = VRegs[Idx].size(); I != E; ++I)
625 MIRBuilder.buildUndef(VRegs[Idx][I]);
626
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000627 ++Idx;
628 continue;
629 }
Tom Stellardc7709e12018-04-24 20:51:28 +0000630 }
631
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000632 ArgInfo OrigArg(VRegs[Idx], Arg.getType());
633 setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
Matt Arsenaultb60a2ae2019-07-19 14:29:30 +0000634
635 splitToValueTypes(
636 OrigArg, SplitArgs, DL, MRI, CC,
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000637 // FIXME: We should probably be passing multiple registers to
638 // handleAssignments to do this
Matt Arsenaultb60a2ae2019-07-19 14:29:30 +0000639 [&](ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, int VTSplitIdx) {
640 packSplitRegsToOrigType(MIRBuilder, VRegs[Idx][VTSplitIdx], Regs,
641 LLTy, PartLLT);
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000642 });
Tom Stellard9d8337d2017-08-01 12:38:33 +0000643
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000644 ++Idx;
645 }
Tom Stellardc7709e12018-04-24 20:51:28 +0000646
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000647 // At least one interpolation mode must be enabled or else the GPU will
648 // hang.
649 //
650 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
651 // set PSInputAddr, the user wants to enable some bits after the compilation
652 // based on run-time states. Since we can't know what the final PSInputEna
653 // will look like, so we shouldn't do anything here and the user should take
654 // responsibility for the correct programming.
655 //
656 // Otherwise, the following restrictions apply:
657 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
658 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
659 // enabled too.
660 if (CC == CallingConv::AMDGPU_PS) {
661 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
662 ((Info->getPSInputAddr() & 0xF) == 0 &&
663 Info->isPSInputAllocated(11))) {
664 CCInfo.AllocateReg(AMDGPU::VGPR0);
665 CCInfo.AllocateReg(AMDGPU::VGPR1);
666 Info->markPSInputAllocated(0);
667 Info->markPSInputEnabled(0);
668 }
669
670 if (Subtarget.isAmdPalOS()) {
671 // For isAmdPalOS, the user does not enable some bits after compilation
672 // based on run-time states; the register values being generated here are
673 // the final ones set in hardware. Therefore we need to apply the
674 // workaround to PSInputAddr and PSInputEnable together. (The case where
675 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend
676 // set up an input arg for a particular interpolation mode, but nothing
677 // uses that input arg. Really we should have an earlier pass that removes
678 // such an arg.)
679 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
680 if ((PsInputBits & 0x7F) == 0 ||
681 ((PsInputBits & 0xF) == 0 &&
682 (PsInputBits >> 11 & 1)))
683 Info->markPSInputEnabled(
684 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
Tom Stellardc7709e12018-04-24 20:51:28 +0000685 }
Tom Stellardca166212017-01-30 21:56:46 +0000686 }
687
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000688 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
689 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg());
Tom Stellard9d8337d2017-08-01 12:38:33 +0000690
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000691 if (!MBB.empty())
692 MIRBuilder.setInstr(*MBB.begin());
Matt Arsenaulte0a4da82019-05-30 19:33:18 +0000693
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000694 FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
695 if (!handleAssignments(CCInfo, ArgLocs, MIRBuilder, SplitArgs, Handler))
696 return false;
697
698 if (!IsEntryFunc) {
699 // Special inputs come after user arguments.
700 TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard9d8337d2017-08-01 12:38:33 +0000701 }
702
Matt Arsenaultfecf43e2019-07-19 14:15:18 +0000703 // Start adding system SGPRs.
704 if (IsEntryFunc) {
705 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsShader);
706 } else {
707 CCInfo.AllocateReg(Info->getScratchRSrcReg());
708 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
709 CCInfo.AllocateReg(Info->getFrameOffsetReg());
710 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
711 }
712
713 // Move back to the end of the basic block.
714 MIRBuilder.setMBB(MBB);
715
716 return true;
Tom Stellard000c5af2016-04-14 19:09:28 +0000717}