blob: 8cbe34ac8aae8fc8115b52f3de90676d04ff2345 [file] [log] [blame]
Tom Stellardd8ea85a2016-12-21 19:06:24 +00001//===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
Tom Stellard000c5af2016-04-14 19:09:28 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard000c5af2016-04-14 19:09:28 +00006//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12///
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000016#include "AMDGPU.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000017#include "AMDGPUISelLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000018#include "AMDGPUSubtarget.h"
19#include "SIISelLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000020#include "SIMachineFunctionInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000021#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000022#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard206b9922019-04-09 02:26:03 +000023#include "llvm/CodeGen/Analysis.h"
Tom Stellardca166212017-01-30 21:56:46 +000024#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000025#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellard206b9922019-04-09 02:26:03 +000027#include "llvm/Support/LowLevelTypeImpl.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000028
29using namespace llvm;
30
Tom Stellard206b9922019-04-09 02:26:03 +000031namespace {
32
33struct OutgoingArgHandler : public CallLowering::ValueHandler {
34 OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
35 MachineInstrBuilder MIB, CCAssignFn *AssignFn)
36 : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
37
38 MachineInstrBuilder MIB;
39
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +000040 Register getStackAddress(uint64_t Size, int64_t Offset,
Tom Stellard206b9922019-04-09 02:26:03 +000041 MachinePointerInfo &MPO) override {
42 llvm_unreachable("not implemented");
43 }
44
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +000045 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
Tom Stellard206b9922019-04-09 02:26:03 +000046 MachinePointerInfo &MPO, CCValAssign &VA) override {
47 llvm_unreachable("not implemented");
48 }
49
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +000050 void assignValueToReg(Register ValVReg, Register PhysReg,
Tom Stellard206b9922019-04-09 02:26:03 +000051 CCValAssign &VA) override {
52 MIB.addUse(PhysReg);
53 MIRBuilder.buildCopy(PhysReg, ValVReg);
54 }
55
56 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
57 CCValAssign::LocInfo LocInfo,
58 const CallLowering::ArgInfo &Info,
59 CCState &State) override {
60 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
61 }
62};
63
64}
65
Tom Stellard000c5af2016-04-14 19:09:28 +000066AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
Matt Arsenault0da63502018-08-31 05:49:54 +000067 : CallLowering(&TLI) {
Tom Stellard000c5af2016-04-14 19:09:28 +000068}
69
70bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
Alexander Ivchenko49168f62018-08-02 08:33:31 +000071 const Value *Val,
Matt Arsenaulte3a676e2019-06-24 15:50:29 +000072 ArrayRef<Register> VRegs) const {
Tom Stellard206b9922019-04-09 02:26:03 +000073
74 MachineFunction &MF = MIRBuilder.getMF();
75 MachineRegisterInfo &MRI = MF.getRegInfo();
76 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
77 MFI->setIfReturnsVoid(!Val);
78
79 if (!Val) {
80 MIRBuilder.buildInstr(AMDGPU::S_ENDPGM).addImm(0);
81 return true;
82 }
83
Matt Arsenaulte3a676e2019-06-24 15:50:29 +000084 Register VReg = VRegs[0];
Tom Stellard206b9922019-04-09 02:26:03 +000085
86 const Function &F = MF.getFunction();
87 auto &DL = F.getParent()->getDataLayout();
88 if (!AMDGPU::isShader(F.getCallingConv()))
Tom Stellard257882f2018-04-24 21:29:36 +000089 return false;
90
Tom Stellard206b9922019-04-09 02:26:03 +000091
92 const AMDGPUTargetLowering &TLI = *getTLI<AMDGPUTargetLowering>();
93 SmallVector<EVT, 4> SplitVTs;
94 SmallVector<uint64_t, 4> Offsets;
95 ArgInfo OrigArg{VReg, Val->getType()};
96 setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
97 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
98
99 SmallVector<ArgInfo, 8> SplitArgs;
100 CCAssignFn *AssignFn = CCAssignFnForReturn(F.getCallingConv(), false);
101 for (unsigned i = 0, e = Offsets.size(); i != e; ++i) {
102 Type *SplitTy = SplitVTs[i].getTypeForEVT(F.getContext());
103 SplitArgs.push_back({VRegs[i], SplitTy, OrigArg.Flags, OrigArg.IsFixed});
104 }
105 auto RetInstr = MIRBuilder.buildInstrNoInsert(AMDGPU::SI_RETURN_TO_EPILOG);
106 OutgoingArgHandler Handler(MIRBuilder, MRI, RetInstr, AssignFn);
107 if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
108 return false;
109 MIRBuilder.insertInstr(RetInstr);
110
Tom Stellard000c5af2016-04-14 19:09:28 +0000111 return true;
112}
113
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000114Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
Tom Stellardca166212017-01-30 21:56:46 +0000115 Type *ParamTy,
Matt Arsenault29f30372018-07-05 17:01:20 +0000116 uint64_t Offset) const {
Tom Stellardca166212017-01-30 21:56:46 +0000117
118 MachineFunction &MF = MIRBuilder.getMF();
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000119 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardca166212017-01-30 21:56:46 +0000120 MachineRegisterInfo &MRI = MF.getRegInfo();
Matthias Braunf1caa282017-12-15 22:22:58 +0000121 const Function &F = MF.getFunction();
Tom Stellardca166212017-01-30 21:56:46 +0000122 const DataLayout &DL = F.getParent()->getDataLayout();
Matt Arsenault0da63502018-08-31 05:49:54 +0000123 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
Daniel Sanders52b4ce72017-03-07 23:20:35 +0000124 LLT PtrType = getLLTForType(*PtrTy, DL);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000125 Register DstReg = MRI.createGenericVirtualRegister(PtrType);
126 Register KernArgSegmentPtr =
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000127 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000128 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
Tom Stellardca166212017-01-30 21:56:46 +0000129
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000130 Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
Tom Stellardca166212017-01-30 21:56:46 +0000131 MIRBuilder.buildConstant(OffsetReg, Offset);
132
133 MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
134
135 return DstReg;
136}
137
138void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
Matt Arsenault29f30372018-07-05 17:01:20 +0000139 Type *ParamTy, uint64_t Offset,
140 unsigned Align,
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000141 Register DstReg) const {
Tom Stellardca166212017-01-30 21:56:46 +0000142 MachineFunction &MF = MIRBuilder.getMF();
Matthias Braunf1caa282017-12-15 22:22:58 +0000143 const Function &F = MF.getFunction();
Tom Stellardca166212017-01-30 21:56:46 +0000144 const DataLayout &DL = F.getParent()->getDataLayout();
Matt Arsenault0da63502018-08-31 05:49:54 +0000145 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellardca166212017-01-30 21:56:46 +0000146 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
147 unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000148 Register PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
Tom Stellardca166212017-01-30 21:56:46 +0000149
150 MachineMemOperand *MMO =
151 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
152 MachineMemOperand::MONonTemporal |
153 MachineMemOperand::MOInvariant,
154 TypeSize, Align);
155
156 MIRBuilder.buildLoad(DstReg, PtrReg, *MMO);
157}
158
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000159static Register findFirstFreeSGPR(CCState &CCInfo) {
Matt Arsenaulte0a4da82019-05-30 19:33:18 +0000160 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
161 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
162 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
163 return AMDGPU::SGPR0 + Reg;
164 }
165 }
166 llvm_unreachable("Cannot allocate sgpr");
167}
168
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +0000169static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
170 MachineFunction &MF,
171 const SIRegisterInfo &TRI,
172 SIMachineFunctionInfo &Info) {
173 const LLT S32 = LLT::scalar(32);
174 MachineRegisterInfo &MRI = MF.getRegInfo();
175
176 if (Info.hasWorkItemIDX()) {
177 Register Reg = AMDGPU::VGPR0;
178 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
179
180 CCInfo.AllocateReg(Reg);
181 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
182 }
183
184 if (Info.hasWorkItemIDY()) {
185 Register Reg = AMDGPU::VGPR1;
186 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
187
188 CCInfo.AllocateReg(Reg);
189 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
190 }
191
192 if (Info.hasWorkItemIDZ()) {
193 Register Reg = AMDGPU::VGPR2;
194 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
195
196 CCInfo.AllocateReg(Reg);
197 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
198 }
199}
200
Matt Arsenaulte0a4da82019-05-30 19:33:18 +0000201static void allocateSystemSGPRs(CCState &CCInfo,
202 MachineFunction &MF,
203 SIMachineFunctionInfo &Info,
204 CallingConv::ID CallConv,
205 bool IsShader) {
206 if (Info.hasPrivateSegmentWaveByteOffset()) {
207 // Scratch wave offset passed in system SGPR.
208 unsigned PrivateSegmentWaveByteOffsetReg;
209
210 if (IsShader) {
211 PrivateSegmentWaveByteOffsetReg =
212 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
213
214 // This is true if the scratch wave byte offset doesn't have a fixed
215 // location.
216 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
217 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
218 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
219 }
220 } else
221 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
222
223 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
224 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
225 }
226}
227
Diana Picusc3dbe232019-06-27 08:54:17 +0000228bool AMDGPUCallLowering::lowerFormalArguments(
229 MachineIRBuilder &MIRBuilder, const Function &F,
230 ArrayRef<ArrayRef<Register>> VRegs) const {
Tom Stellard37444282018-05-07 22:17:54 +0000231 // AMDGPU_GS and AMDGP_HS are not supported yet.
232 if (F.getCallingConv() == CallingConv::AMDGPU_GS ||
233 F.getCallingConv() == CallingConv::AMDGPU_HS)
Tom Stellard6c814182018-04-30 15:15:23 +0000234 return false;
Tom Stellardca166212017-01-30 21:56:46 +0000235
236 MachineFunction &MF = MIRBuilder.getMF();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000237 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
Tom Stellardca166212017-01-30 21:56:46 +0000238 MachineRegisterInfo &MRI = MF.getRegInfo();
239 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000240 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
Tom Stellardca166212017-01-30 21:56:46 +0000241 const DataLayout &DL = F.getParent()->getDataLayout();
242
Matt Arsenaulte0a4da82019-05-30 19:33:18 +0000243 bool IsShader = AMDGPU::isShader(F.getCallingConv());
244
Tom Stellardca166212017-01-30 21:56:46 +0000245 SmallVector<CCValAssign, 16> ArgLocs;
246 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
247
248 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
249 if (Info->hasPrivateSegmentBuffer()) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000250 Register PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
Tom Stellardca166212017-01-30 21:56:46 +0000251 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass);
252 CCInfo.AllocateReg(PrivateSegmentBufferReg);
253 }
254
255 if (Info->hasDispatchPtr()) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000256 Register DispatchPtrReg = Info->addDispatchPtr(*TRI);
Tom Stellardca166212017-01-30 21:56:46 +0000257 // FIXME: Need to add reg as live-in
258 CCInfo.AllocateReg(DispatchPtrReg);
259 }
260
261 if (Info->hasQueuePtr()) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000262 Register QueuePtrReg = Info->addQueuePtr(*TRI);
Tom Stellardca166212017-01-30 21:56:46 +0000263 // FIXME: Need to add reg as live-in
264 CCInfo.AllocateReg(QueuePtrReg);
265 }
266
267 if (Info->hasKernargSegmentPtr()) {
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000268 Register InputPtrReg = Info->addKernargSegmentPtr(*TRI);
Yaxun Liu0124b542018-02-13 18:00:25 +0000269 const LLT P2 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000270 Register VReg = MRI.createGenericVirtualRegister(P2);
Tom Stellardca166212017-01-30 21:56:46 +0000271 MRI.addLiveIn(InputPtrReg, VReg);
272 MIRBuilder.getMBB().addLiveIn(InputPtrReg);
273 MIRBuilder.buildCopy(VReg, InputPtrReg);
274 CCInfo.AllocateReg(InputPtrReg);
275 }
276
277 if (Info->hasDispatchID()) {
278 unsigned DispatchIDReg = Info->addDispatchID(*TRI);
279 // FIXME: Need to add reg as live-in
280 CCInfo.AllocateReg(DispatchIDReg);
281 }
282
283 if (Info->hasFlatScratchInit()) {
284 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
285 // FIXME: Need to add reg as live-in
286 CCInfo.AllocateReg(FlatScratchInitReg);
287 }
288
Matt Arsenault29f30372018-07-05 17:01:20 +0000289 // The infrastructure for normal calling convention lowering is essentially
290 // useless for kernels. We want to avoid any kind of legalization or argument
291 // splitting.
292 if (F.getCallingConv() == CallingConv::AMDGPU_KERNEL) {
293 unsigned i = 0;
294 const unsigned KernArgBaseAlign = 16;
295 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F);
296 uint64_t ExplicitArgOffset = 0;
297
298 // TODO: Align down to dword alignment and extract bits for extending loads.
299 for (auto &Arg : F.args()) {
300 Type *ArgTy = Arg.getType();
301 unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
302 if (AllocSize == 0)
303 continue;
304
305 unsigned ABIAlign = DL.getABITypeAlignment(ArgTy);
306
307 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
308 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
309
Diana Picusc3dbe232019-06-27 08:54:17 +0000310 ArrayRef<Register> OrigArgRegs = VRegs[i];
311 Register ArgReg =
312 OrigArgRegs.size() == 1
313 ? OrigArgRegs[0]
314 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL));
Matt Arsenault29f30372018-07-05 17:01:20 +0000315 unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset);
316 ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy));
Diana Picusc3dbe232019-06-27 08:54:17 +0000317 lowerParameter(MIRBuilder, ArgTy, ArgOffset, Align, ArgReg);
318 if (OrigArgRegs.size() > 1)
319 unpackRegs(OrigArgRegs, ArgReg, ArgTy, MIRBuilder);
Matt Arsenault29f30372018-07-05 17:01:20 +0000320 ++i;
321 }
322
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +0000323 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte0a4da82019-05-30 19:33:18 +0000324 allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), IsShader);
Matt Arsenault29f30372018-07-05 17:01:20 +0000325 return true;
326 }
327
Tom Stellardca166212017-01-30 21:56:46 +0000328 unsigned NumArgs = F.arg_size();
329 Function::const_arg_iterator CurOrigArg = F.arg_begin();
330 const AMDGPUTargetLowering &TLI = *getTLI<AMDGPUTargetLowering>();
Tom Stellardc7709e12018-04-24 20:51:28 +0000331 unsigned PSInputNum = 0;
332 BitVector Skipped(NumArgs);
Tom Stellardca166212017-01-30 21:56:46 +0000333 for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) {
Tom Stellard9d8337d2017-08-01 12:38:33 +0000334 EVT ValEVT = TLI.getValueType(DL, CurOrigArg->getType());
335
336 // We can only hanlde simple value types at the moment.
Tom Stellardca166212017-01-30 21:56:46 +0000337 ISD::ArgFlagsTy Flags;
Diana Picusc3dbe232019-06-27 08:54:17 +0000338 assert(VRegs[i].size() == 1 && "Can't lower into more than one register");
339 ArgInfo OrigArg{VRegs[i][0], CurOrigArg->getType()};
Tom Stellard9d8337d2017-08-01 12:38:33 +0000340 setArgFlags(OrigArg, i + 1, DL, F);
Tom Stellardca166212017-01-30 21:56:46 +0000341 Flags.setOrigAlign(DL.getABITypeAlignment(CurOrigArg->getType()));
Tom Stellardc7709e12018-04-24 20:51:28 +0000342
343 if (F.getCallingConv() == CallingConv::AMDGPU_PS &&
344 !OrigArg.Flags.isInReg() && !OrigArg.Flags.isByVal() &&
345 PSInputNum <= 15) {
346 if (CurOrigArg->use_empty() && !Info->isPSInputAllocated(PSInputNum)) {
347 Skipped.set(i);
348 ++PSInputNum;
349 continue;
350 }
351
352 Info->markPSInputAllocated(PSInputNum);
353 if (!CurOrigArg->use_empty())
354 Info->markPSInputEnabled(PSInputNum);
355
356 ++PSInputNum;
357 }
358
Tom Stellardca166212017-01-30 21:56:46 +0000359 CCAssignFn *AssignFn = CCAssignFnForCall(F.getCallingConv(),
360 /*IsVarArg=*/false);
Tom Stellard9d8337d2017-08-01 12:38:33 +0000361
Tom Stellardc7709e12018-04-24 20:51:28 +0000362 if (ValEVT.isVector()) {
363 EVT ElemVT = ValEVT.getVectorElementType();
364 if (!ValEVT.isSimple())
365 return false;
366 MVT ValVT = ElemVT.getSimpleVT();
367 bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full,
368 OrigArg.Flags, CCInfo);
369 if (!Res)
370 return false;
371 } else {
372 MVT ValVT = ValEVT.getSimpleVT();
373 if (!ValEVT.isSimple())
374 return false;
375 bool Res =
376 AssignFn(i, ValVT, ValVT, CCValAssign::Full, OrigArg.Flags, CCInfo);
377
378 // Fail if we don't know how to handle this type.
379 if (Res)
380 return false;
381 }
Tom Stellardca166212017-01-30 21:56:46 +0000382 }
383
384 Function::const_arg_iterator Arg = F.arg_begin();
Tom Stellard9d8337d2017-08-01 12:38:33 +0000385
Tom Stellardc7709e12018-04-24 20:51:28 +0000386 if (F.getCallingConv() == CallingConv::AMDGPU_VS ||
387 F.getCallingConv() == CallingConv::AMDGPU_PS) {
388 for (unsigned i = 0, OrigArgIdx = 0;
389 OrigArgIdx != NumArgs && i != ArgLocs.size(); ++Arg, ++OrigArgIdx) {
390 if (Skipped.test(OrigArgIdx))
391 continue;
Diana Picusc3dbe232019-06-27 08:54:17 +0000392 assert(VRegs[OrigArgIdx].size() == 1 &&
393 "Can't lower into more than 1 reg");
394 CCValAssign &VA = ArgLocs[i++];
395 MRI.addLiveIn(VA.getLocReg(), VRegs[OrigArgIdx][0]);
396 MIRBuilder.getMBB().addLiveIn(VA.getLocReg());
397 MIRBuilder.buildCopy(VRegs[OrigArgIdx][0], VA.getLocReg());
Tom Stellard9d8337d2017-08-01 12:38:33 +0000398 }
Matt Arsenaulte0a4da82019-05-30 19:33:18 +0000399
400 allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), IsShader);
Tom Stellard9d8337d2017-08-01 12:38:33 +0000401 return true;
402 }
403
Matt Arsenault29f30372018-07-05 17:01:20 +0000404 return false;
Tom Stellard000c5af2016-04-14 19:09:28 +0000405}