blob: 20dbcf309555ed28ae7c6d969cfb761a1fb1ee1d [file] [log] [blame]
Tom Stellardd8ea85a2016-12-21 19:06:24 +00001//===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
Tom Stellard000c5af2016-04-14 19:09:28 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard000c5af2016-04-14 19:09:28 +00006//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12///
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUCallLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000016#include "AMDGPU.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000017#include "AMDGPUISelLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000018#include "AMDGPUSubtarget.h"
19#include "SIISelLowering.h"
Tom Stellardca166212017-01-30 21:56:46 +000020#include "SIMachineFunctionInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000021#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000022#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellardca166212017-01-30 21:56:46 +000023#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard000c5af2016-04-14 19:09:28 +000024#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
25#include "llvm/CodeGen/MachineInstrBuilder.h"
26
27using namespace llvm;
28
Tom Stellard000c5af2016-04-14 19:09:28 +000029AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
Matt Arsenault0da63502018-08-31 05:49:54 +000030 : CallLowering(&TLI) {
Tom Stellard000c5af2016-04-14 19:09:28 +000031}
32
33bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
Alexander Ivchenko49168f62018-08-02 08:33:31 +000034 const Value *Val,
35 ArrayRef<unsigned> VRegs) const {
Tom Stellard257882f2018-04-24 21:29:36 +000036 // FIXME: Add support for non-void returns.
37 if (Val)
38 return false;
39
Tom Stellardca166212017-01-30 21:56:46 +000040 MIRBuilder.buildInstr(AMDGPU::S_ENDPGM);
Tom Stellard000c5af2016-04-14 19:09:28 +000041 return true;
42}
43
Tom Stellardca166212017-01-30 21:56:46 +000044unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
45 Type *ParamTy,
Matt Arsenault29f30372018-07-05 17:01:20 +000046 uint64_t Offset) const {
Tom Stellardca166212017-01-30 21:56:46 +000047
48 MachineFunction &MF = MIRBuilder.getMF();
Matt Arsenault8623e8d2017-08-03 23:00:29 +000049 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardca166212017-01-30 21:56:46 +000050 MachineRegisterInfo &MRI = MF.getRegInfo();
Matthias Braunf1caa282017-12-15 22:22:58 +000051 const Function &F = MF.getFunction();
Tom Stellardca166212017-01-30 21:56:46 +000052 const DataLayout &DL = F.getParent()->getDataLayout();
Matt Arsenault0da63502018-08-31 05:49:54 +000053 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
Daniel Sanders52b4ce72017-03-07 23:20:35 +000054 LLT PtrType = getLLTForType(*PtrTy, DL);
Tom Stellardca166212017-01-30 21:56:46 +000055 unsigned DstReg = MRI.createGenericVirtualRegister(PtrType);
56 unsigned KernArgSegmentPtr =
Matt Arsenault8623e8d2017-08-03 23:00:29 +000057 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellardca166212017-01-30 21:56:46 +000058 unsigned KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
59
60 unsigned OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
61 MIRBuilder.buildConstant(OffsetReg, Offset);
62
63 MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
64
65 return DstReg;
66}
67
68void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
Matt Arsenault29f30372018-07-05 17:01:20 +000069 Type *ParamTy, uint64_t Offset,
70 unsigned Align,
Tom Stellardca166212017-01-30 21:56:46 +000071 unsigned DstReg) const {
72 MachineFunction &MF = MIRBuilder.getMF();
Matthias Braunf1caa282017-12-15 22:22:58 +000073 const Function &F = MF.getFunction();
Tom Stellardca166212017-01-30 21:56:46 +000074 const DataLayout &DL = F.getParent()->getDataLayout();
Matt Arsenault0da63502018-08-31 05:49:54 +000075 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellardca166212017-01-30 21:56:46 +000076 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
77 unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
Tom Stellardca166212017-01-30 21:56:46 +000078 unsigned PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
79
80 MachineMemOperand *MMO =
81 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
82 MachineMemOperand::MONonTemporal |
83 MachineMemOperand::MOInvariant,
84 TypeSize, Align);
85
86 MIRBuilder.buildLoad(DstReg, PtrReg, *MMO);
87}
88
Tim Northover862758ec2016-09-21 12:57:35 +000089bool AMDGPUCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
90 const Function &F,
91 ArrayRef<unsigned> VRegs) const {
Tom Stellard37444282018-05-07 22:17:54 +000092 // AMDGPU_GS and AMDGP_HS are not supported yet.
93 if (F.getCallingConv() == CallingConv::AMDGPU_GS ||
94 F.getCallingConv() == CallingConv::AMDGPU_HS)
Tom Stellard6c814182018-04-30 15:15:23 +000095 return false;
Tom Stellardca166212017-01-30 21:56:46 +000096
97 MachineFunction &MF = MIRBuilder.getMF();
Tom Stellard5bfbae52018-07-11 20:59:01 +000098 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
Tom Stellardca166212017-01-30 21:56:46 +000099 MachineRegisterInfo &MRI = MF.getRegInfo();
100 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000101 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
Tom Stellardca166212017-01-30 21:56:46 +0000102 const DataLayout &DL = F.getParent()->getDataLayout();
103
104 SmallVector<CCValAssign, 16> ArgLocs;
105 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
106
107 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
108 if (Info->hasPrivateSegmentBuffer()) {
109 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
110 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass);
111 CCInfo.AllocateReg(PrivateSegmentBufferReg);
112 }
113
114 if (Info->hasDispatchPtr()) {
115 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI);
116 // FIXME: Need to add reg as live-in
117 CCInfo.AllocateReg(DispatchPtrReg);
118 }
119
120 if (Info->hasQueuePtr()) {
121 unsigned QueuePtrReg = Info->addQueuePtr(*TRI);
122 // FIXME: Need to add reg as live-in
123 CCInfo.AllocateReg(QueuePtrReg);
124 }
125
126 if (Info->hasKernargSegmentPtr()) {
127 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI);
Yaxun Liu0124b542018-02-13 18:00:25 +0000128 const LLT P2 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
Tom Stellardca166212017-01-30 21:56:46 +0000129 unsigned VReg = MRI.createGenericVirtualRegister(P2);
130 MRI.addLiveIn(InputPtrReg, VReg);
131 MIRBuilder.getMBB().addLiveIn(InputPtrReg);
132 MIRBuilder.buildCopy(VReg, InputPtrReg);
133 CCInfo.AllocateReg(InputPtrReg);
134 }
135
136 if (Info->hasDispatchID()) {
137 unsigned DispatchIDReg = Info->addDispatchID(*TRI);
138 // FIXME: Need to add reg as live-in
139 CCInfo.AllocateReg(DispatchIDReg);
140 }
141
142 if (Info->hasFlatScratchInit()) {
143 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
144 // FIXME: Need to add reg as live-in
145 CCInfo.AllocateReg(FlatScratchInitReg);
146 }
147
Matt Arsenault29f30372018-07-05 17:01:20 +0000148 // The infrastructure for normal calling convention lowering is essentially
149 // useless for kernels. We want to avoid any kind of legalization or argument
150 // splitting.
151 if (F.getCallingConv() == CallingConv::AMDGPU_KERNEL) {
152 unsigned i = 0;
153 const unsigned KernArgBaseAlign = 16;
154 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F);
155 uint64_t ExplicitArgOffset = 0;
156
157 // TODO: Align down to dword alignment and extract bits for extending loads.
158 for (auto &Arg : F.args()) {
159 Type *ArgTy = Arg.getType();
160 unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
161 if (AllocSize == 0)
162 continue;
163
164 unsigned ABIAlign = DL.getABITypeAlignment(ArgTy);
165
166 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
167 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
168
169 unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset);
170 ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy));
171 lowerParameter(MIRBuilder, ArgTy, ArgOffset, Align, VRegs[i]);
172 ++i;
173 }
174
175 return true;
176 }
177
Tom Stellardca166212017-01-30 21:56:46 +0000178 unsigned NumArgs = F.arg_size();
179 Function::const_arg_iterator CurOrigArg = F.arg_begin();
180 const AMDGPUTargetLowering &TLI = *getTLI<AMDGPUTargetLowering>();
Tom Stellardc7709e12018-04-24 20:51:28 +0000181 unsigned PSInputNum = 0;
182 BitVector Skipped(NumArgs);
Tom Stellardca166212017-01-30 21:56:46 +0000183 for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) {
Tom Stellard9d8337d2017-08-01 12:38:33 +0000184 EVT ValEVT = TLI.getValueType(DL, CurOrigArg->getType());
185
186 // We can only hanlde simple value types at the moment.
Tom Stellardca166212017-01-30 21:56:46 +0000187 ISD::ArgFlagsTy Flags;
Tom Stellard9d8337d2017-08-01 12:38:33 +0000188 ArgInfo OrigArg{VRegs[i], CurOrigArg->getType()};
189 setArgFlags(OrigArg, i + 1, DL, F);
Tom Stellardca166212017-01-30 21:56:46 +0000190 Flags.setOrigAlign(DL.getABITypeAlignment(CurOrigArg->getType()));
Tom Stellardc7709e12018-04-24 20:51:28 +0000191
192 if (F.getCallingConv() == CallingConv::AMDGPU_PS &&
193 !OrigArg.Flags.isInReg() && !OrigArg.Flags.isByVal() &&
194 PSInputNum <= 15) {
195 if (CurOrigArg->use_empty() && !Info->isPSInputAllocated(PSInputNum)) {
196 Skipped.set(i);
197 ++PSInputNum;
198 continue;
199 }
200
201 Info->markPSInputAllocated(PSInputNum);
202 if (!CurOrigArg->use_empty())
203 Info->markPSInputEnabled(PSInputNum);
204
205 ++PSInputNum;
206 }
207
Tom Stellardca166212017-01-30 21:56:46 +0000208 CCAssignFn *AssignFn = CCAssignFnForCall(F.getCallingConv(),
209 /*IsVarArg=*/false);
Tom Stellard9d8337d2017-08-01 12:38:33 +0000210
Tom Stellardc7709e12018-04-24 20:51:28 +0000211 if (ValEVT.isVector()) {
212 EVT ElemVT = ValEVT.getVectorElementType();
213 if (!ValEVT.isSimple())
214 return false;
215 MVT ValVT = ElemVT.getSimpleVT();
216 bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full,
217 OrigArg.Flags, CCInfo);
218 if (!Res)
219 return false;
220 } else {
221 MVT ValVT = ValEVT.getSimpleVT();
222 if (!ValEVT.isSimple())
223 return false;
224 bool Res =
225 AssignFn(i, ValVT, ValVT, CCValAssign::Full, OrigArg.Flags, CCInfo);
226
227 // Fail if we don't know how to handle this type.
228 if (Res)
229 return false;
230 }
Tom Stellardca166212017-01-30 21:56:46 +0000231 }
232
233 Function::const_arg_iterator Arg = F.arg_begin();
Tom Stellard9d8337d2017-08-01 12:38:33 +0000234
Tom Stellardc7709e12018-04-24 20:51:28 +0000235 if (F.getCallingConv() == CallingConv::AMDGPU_VS ||
236 F.getCallingConv() == CallingConv::AMDGPU_PS) {
237 for (unsigned i = 0, OrigArgIdx = 0;
238 OrigArgIdx != NumArgs && i != ArgLocs.size(); ++Arg, ++OrigArgIdx) {
239 if (Skipped.test(OrigArgIdx))
240 continue;
241 CCValAssign &VA = ArgLocs[i++];
242 MRI.addLiveIn(VA.getLocReg(), VRegs[OrigArgIdx]);
Tom Stellard9d8337d2017-08-01 12:38:33 +0000243 MIRBuilder.getMBB().addLiveIn(VA.getLocReg());
Tom Stellardc7709e12018-04-24 20:51:28 +0000244 MIRBuilder.buildCopy(VRegs[OrigArgIdx], VA.getLocReg());
Tom Stellard9d8337d2017-08-01 12:38:33 +0000245 }
246 return true;
247 }
248
Matt Arsenault29f30372018-07-05 17:01:20 +0000249 return false;
Tom Stellard000c5af2016-04-14 19:09:28 +0000250}