blob: 43f4b9f7ddec910b76f4546ca4b5b1570764b6e6 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIMachineFunctionInfo.cpp - SI Machine Function Info -------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10
11
12#include "SIMachineFunctionInfo.h"
Tom Stellard96468902014-09-24 01:33:17 +000013#include "AMDGPUSubtarget.h"
Tom Stellardeba61072014-05-02 15:41:42 +000014#include "SIInstrInfo.h"
Tom Stellard96468902014-09-24 01:33:17 +000015#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellardc5cf2f02014-08-21 20:40:54 +000016#include "llvm/CodeGen/MachineFrameInfo.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000017#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellardeba61072014-05-02 15:41:42 +000018#include "llvm/IR/Function.h"
19#include "llvm/IR/LLVMContext.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000020
21#define MAX_LANES 64
Tom Stellard75aadc22012-12-11 21:25:42 +000022
23using namespace llvm;
24
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000025
26// Pin the vtable to this file.
27void SIMachineFunctionInfo::anchor() {}
28
Tom Stellard75aadc22012-12-11 21:25:42 +000029SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
Vincent Lejeuneace6f732013-04-01 21:47:53 +000030 : AMDGPUMachineFunction(MF),
Tom Stellard96468902014-09-24 01:33:17 +000031 TIDReg(AMDGPU::NoRegister),
Matt Arsenault49affb82015-11-25 20:55:12 +000032 ScratchRSrcReg(AMDGPU::NoRegister),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000033 ScratchWaveOffsetReg(AMDGPU::NoRegister),
34 PrivateSegmentBufferUserSGPR(AMDGPU::NoRegister),
35 DispatchPtrUserSGPR(AMDGPU::NoRegister),
36 QueuePtrUserSGPR(AMDGPU::NoRegister),
37 KernargSegmentPtrUserSGPR(AMDGPU::NoRegister),
38 DispatchIDUserSGPR(AMDGPU::NoRegister),
39 FlatScratchInitUserSGPR(AMDGPU::NoRegister),
40 PrivateSegmentSizeUserSGPR(AMDGPU::NoRegister),
41 GridWorkGroupCountXUserSGPR(AMDGPU::NoRegister),
42 GridWorkGroupCountYUserSGPR(AMDGPU::NoRegister),
43 GridWorkGroupCountZUserSGPR(AMDGPU::NoRegister),
44 WorkGroupIDXSystemSGPR(AMDGPU::NoRegister),
45 WorkGroupIDYSystemSGPR(AMDGPU::NoRegister),
46 WorkGroupIDZSystemSGPR(AMDGPU::NoRegister),
47 WorkGroupInfoSystemSGPR(AMDGPU::NoRegister),
48 PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister),
Tom Stellardc149dc02013-11-27 21:23:35 +000049 PSInputAddr(0),
Marek Olsak8e9cc632016-01-13 17:23:09 +000050 ReturnsVoid(true),
Tom Stellard79a1fd72016-04-14 16:27:07 +000051 MaximumWorkGroupSize(0),
Marek Olsakfccabaf2016-01-13 11:45:36 +000052 LDSWaveSpillSize(0),
53 PSInputEna(0),
Tom Stellard96468902014-09-24 01:33:17 +000054 NumUserSGPRs(0),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000055 NumSystemSGPRs(0),
Matt Arsenault49affb82015-11-25 20:55:12 +000056 HasSpilledSGPRs(false),
57 HasSpilledVGPRs(false),
Matt Arsenault296b8492016-02-12 06:31:30 +000058 HasNonSpillStackObjects(false),
59 HasFlatInstructions(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000060 PrivateSegmentBuffer(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000061 DispatchPtr(false),
62 QueuePtr(false),
63 DispatchID(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000064 KernargSegmentPtr(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000065 FlatScratchInit(false),
66 GridWorkgroupCountX(false),
67 GridWorkgroupCountY(false),
68 GridWorkgroupCountZ(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000069 WorkGroupIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000070 WorkGroupIDY(false),
71 WorkGroupIDZ(false),
72 WorkGroupInfo(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000073 PrivateSegmentWaveByteOffset(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000074 WorkItemIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000075 WorkItemIDY(false),
76 WorkItemIDZ(false) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000077 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
Matt Arsenault49affb82015-11-25 20:55:12 +000078 const Function *F = MF.getFunction();
79
Marek Olsakfccabaf2016-01-13 11:45:36 +000080 PSInputAddr = AMDGPU::getInitialPSInputAddr(*F);
81
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000082 const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
83
Tom Stellardf110f8f2016-04-14 16:27:03 +000084 if (!AMDGPU::isShader(F->getCallingConv())) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000085 KernargSegmentPtr = true;
Tom Stellardf110f8f2016-04-14 16:27:03 +000086 WorkGroupIDX = true;
87 WorkItemIDX = true;
88 }
Matt Arsenault49affb82015-11-25 20:55:12 +000089
90 if (F->hasFnAttribute("amdgpu-work-group-id-y"))
91 WorkGroupIDY = true;
92
93 if (F->hasFnAttribute("amdgpu-work-group-id-z"))
94 WorkGroupIDZ = true;
95
96 if (F->hasFnAttribute("amdgpu-work-item-id-y"))
97 WorkItemIDY = true;
98
99 if (F->hasFnAttribute("amdgpu-work-item-id-z"))
100 WorkItemIDZ = true;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000101
Matt Arsenault296b8492016-02-12 06:31:30 +0000102 // X, XY, and XYZ are the only supported combinations, so make sure Y is
103 // enabled if Z is.
104 if (WorkItemIDZ)
105 WorkItemIDY = true;
106
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000107 bool MaySpill = ST.isVGPRSpillingEnabled(*F);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000108 bool HasStackObjects = FrameInfo->hasStackObjects();
109
110 if (HasStackObjects || MaySpill)
111 PrivateSegmentWaveByteOffset = true;
112
113 if (ST.isAmdHsaOS()) {
114 if (HasStackObjects || MaySpill)
115 PrivateSegmentBuffer = true;
116
117 if (F->hasFnAttribute("amdgpu-dispatch-ptr"))
118 DispatchPtr = true;
119 }
120
Matt Arsenault296b8492016-02-12 06:31:30 +0000121 // We don't need to worry about accessing spills with flat instructions.
122 // TODO: On VI where we must use flat for global, we should be able to omit
123 // this if it is never used for generic access.
124 if (HasStackObjects && ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS &&
125 ST.isAmdHsaOS())
126 FlatScratchInit = true;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000127
128 if (AMDGPU::isCompute(F->getCallingConv()))
129 MaximumWorkGroupSize = AMDGPU::getMaximumWorkGroupSize(*F);
130 else
131 MaximumWorkGroupSize = ST.getWavefrontSize();
Matt Arsenault49affb82015-11-25 20:55:12 +0000132}
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000133
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000134unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer(
135 const SIRegisterInfo &TRI) {
136 PrivateSegmentBufferUserSGPR = TRI.getMatchingSuperReg(
137 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
138 NumUserSGPRs += 4;
139 return PrivateSegmentBufferUserSGPR;
140}
141
142unsigned SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
143 DispatchPtrUserSGPR = TRI.getMatchingSuperReg(
144 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
145 NumUserSGPRs += 2;
146 return DispatchPtrUserSGPR;
147}
148
149unsigned SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
150 QueuePtrUserSGPR = TRI.getMatchingSuperReg(
151 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
152 NumUserSGPRs += 2;
153 return QueuePtrUserSGPR;
154}
155
156unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
157 KernargSegmentPtrUserSGPR = TRI.getMatchingSuperReg(
158 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
159 NumUserSGPRs += 2;
160 return KernargSegmentPtrUserSGPR;
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000161}
162
Matt Arsenault296b8492016-02-12 06:31:30 +0000163unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
164 FlatScratchInitUserSGPR = TRI.getMatchingSuperReg(
165 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
166 NumUserSGPRs += 2;
167 return FlatScratchInitUserSGPR;
168}
169
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000170SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
171 MachineFunction *MF,
172 unsigned FrameIndex,
173 unsigned SubIdx) {
Tom Stellard649b5db2016-03-04 18:31:18 +0000174 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
Eric Christopher0795a2e2015-02-19 01:10:55 +0000175 const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
176 MF->getSubtarget<AMDGPUSubtarget>().getRegisterInfo());
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000177 MachineRegisterInfo &MRI = MF->getRegInfo();
178 int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
179 Offset += SubIdx * 4;
180
181 unsigned LaneVGPRIdx = Offset / (64 * 4);
182 unsigned Lane = (Offset / 4) % 64;
183
184 struct SpilledReg Spill;
Tom Stellard649b5db2016-03-04 18:31:18 +0000185 Spill.Lane = Lane;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000186
187 if (!LaneVGPRs.count(LaneVGPRIdx)) {
Tom Stellard42fb60e2015-01-14 15:42:31 +0000188 unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000189
Tom Stellard649b5db2016-03-04 18:31:18 +0000190 if (LaneVGPR == AMDGPU::NoRegister)
191 // We have no VGPRs left for spilling SGPRs.
192 return Spill;
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000193
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000194
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000195 LaneVGPRs[LaneVGPRIdx] = LaneVGPR;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000196
197 // Add this register as live-in to all blocks to avoid machine verifer
198 // complaining about use of an undefined physical register.
199 for (MachineFunction::iterator BI = MF->begin(), BE = MF->end();
200 BI != BE; ++BI) {
201 BI->addLiveIn(LaneVGPR);
202 }
203 }
204
205 Spill.VGPR = LaneVGPRs[LaneVGPRIdx];
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000206 return Spill;
Tom Stellardc149dc02013-11-27 21:23:35 +0000207}
Tom Stellard96468902014-09-24 01:33:17 +0000208
209unsigned SIMachineFunctionInfo::getMaximumWorkGroupSize(
210 const MachineFunction &MF) const {
Tom Stellard79a1fd72016-04-14 16:27:07 +0000211 return MaximumWorkGroupSize;
Tom Stellard96468902014-09-24 01:33:17 +0000212}