blob: 94821ce64329e267601ba49f81bbef64538313ff [file] [log] [blame]
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -07001//===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===//
2//
3// The Subzero Code Generator
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
Andrew Scull9612d322015-07-06 14:53:25 -07009///
10/// \file
Andrew Scull57e12682015-09-16 11:30:19 -070011/// This file implements the skeleton of the TargetLowering class, specifically
12/// invoking the appropriate lowering method for a given instruction kind and
13/// driving global register allocation. It also implements the non-deleted
14/// instruction iteration in LoweringContext.
Andrew Scull9612d322015-07-06 14:53:25 -070015///
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070016//===----------------------------------------------------------------------===//
17
John Porto67f8de92015-06-25 10:14:17 -070018#include "IceTargetLowering.h"
19
John Portoaff4ccf2015-06-10 16:35:06 -070020#include "IceAssemblerARM32.h"
John Porto2da710c2015-06-29 07:57:02 -070021#include "IceAssemblerMIPS32.h"
John Portoaff4ccf2015-06-10 16:35:06 -070022#include "IceAssemblerX8632.h"
John Porto7e93c622015-06-23 10:58:57 -070023#include "IceAssemblerX8664.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070024#include "IceCfg.h" // setError()
25#include "IceCfgNode.h"
Jan Voung58eea4d2015-06-15 15:11:56 -070026#include "IceGlobalInits.h"
John Portoec3f5652015-08-31 15:07:09 -070027#include "IceInstVarIter.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070028#include "IceOperand.h"
Jim Stichnothd97c7df2014-06-04 11:57:08 -070029#include "IceRegAlloc.h"
Jan Voungb36ad9b2015-04-21 17:01:49 -070030#include "IceTargetLoweringARM32.h"
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070031#include "IceTargetLoweringMIPS32.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070032#include "IceTargetLoweringX8632.h"
John Porto7e93c622015-06-23 10:58:57 -070033#include "IceTargetLoweringX8664.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070034
35namespace Ice {
36
37void LoweringContext::init(CfgNode *N) {
38 Node = N;
Jim Stichnoth336f6c42014-10-30 15:01:31 -070039 End = getNode()->getInsts().end();
40 rewind();
41 advanceForward(Next);
42}
43
44void LoweringContext::rewind() {
Jan Vounge6e497d2014-07-30 10:06:03 -070045 Begin = getNode()->getInsts().begin();
46 Cur = Begin;
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070047 skipDeleted(Cur);
48 Next = Cur;
Jim Stichnoth318f4cd2015-10-01 21:02:37 -070049 availabilityReset();
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070050}
51
52void LoweringContext::insert(Inst *Inst) {
53 getNode()->getInsts().insert(Next, Inst);
Jim Stichnoth98712a32014-10-24 10:59:02 -070054 LastInserted = Inst;
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070055}
56
Jan Voungc820ddf2014-07-29 14:38:51 -070057void LoweringContext::skipDeleted(InstList::iterator &I) const {
Jim Stichnoth607e9f02014-11-06 13:32:05 -080058 while (I != End && I->isDeleted())
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070059 ++I;
60}
61
Jan Vounge6e497d2014-07-30 10:06:03 -070062void LoweringContext::advanceForward(InstList::iterator &I) const {
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070063 if (I != End) {
64 ++I;
65 skipDeleted(I);
66 }
67}
68
Jan Vounge6e497d2014-07-30 10:06:03 -070069Inst *LoweringContext::getLastInserted() const {
Jim Stichnoth98712a32014-10-24 10:59:02 -070070 assert(LastInserted);
71 return LastInserted;
Jan Vounge6e497d2014-07-30 10:06:03 -070072}
73
Jim Stichnoth318f4cd2015-10-01 21:02:37 -070074void LoweringContext::availabilityReset() {
75 LastDest = nullptr;
76 LastSrc = nullptr;
77}
78
79void LoweringContext::availabilityUpdate() {
80 availabilityReset();
81 Inst *Instr = LastInserted;
82 if (Instr == nullptr)
83 return;
Jim Stichnoth28b71be2015-10-12 15:24:46 -070084 if (!Instr->isVarAssign())
Jim Stichnoth318f4cd2015-10-01 21:02:37 -070085 return;
Jim Stichnoth28b71be2015-10-12 15:24:46 -070086 // Since isVarAssign() is true, the source operand must be a Variable.
87 LastDest = Instr->getDest();
88 LastSrc = llvm::cast<Variable>(Instr->getSrc(0));
Jim Stichnoth318f4cd2015-10-01 21:02:37 -070089}
90
91Variable *LoweringContext::availabilityGet(Operand *Src) const {
92 assert(Src);
93 if (Src == LastDest)
94 return LastSrc;
95 return nullptr;
96}
97
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070098TargetLowering *TargetLowering::createLowering(TargetArch Target, Cfg *Func) {
Jan Voungb36ad9b2015-04-21 17:01:49 -070099#define SUBZERO_TARGET(X) \
100 if (Target == Target_##X) \
101 return Target##X::create(Func);
102#include "llvm/Config/SZTargets.def"
103
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700104 Func->setError("Unsupported target");
Jim Stichnothae953202014-12-20 06:17:49 -0800105 return nullptr;
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700106}
107
Jim Stichnoth94844f12015-11-04 16:06:16 -0800108void TargetLowering::staticInit(TargetArch Target) {
109 // Call the specified target's static initializer.
110 switch (Target) {
111 default:
112 llvm::report_fatal_error("Unsupported target");
113 break;
114#define SUBZERO_TARGET(X) \
115 case Target_##X: { \
116 static bool InitGuard##X = false; \
117 if (InitGuard##X) \
118 return; \
119 InitGuard##X = true; \
120 Target##X::staticInit(); \
121 } break;
122#include "llvm/Config/SZTargets.def"
123 }
124}
125
Jim Stichnothe6d24782014-12-19 05:42:24 -0800126TargetLowering::TargetLowering(Cfg *Func)
Jim Stichnotheafb56c2015-06-22 10:35:22 -0700127 : Func(Func), Ctx(Func->getContext()), Context() {}
Jim Stichnothe6d24782014-12-19 05:42:24 -0800128
Jan Voungec270732015-01-12 17:00:22 -0800129std::unique_ptr<Assembler> TargetLowering::createAssembler(TargetArch Target,
130 Cfg *Func) {
Jan Voung90ccc3f2015-04-30 14:15:10 -0700131#define SUBZERO_TARGET(X) \
132 if (Target == Target_##X) \
Jim Stichnoth5bff61c2015-10-28 09:26:00 -0700133 return std::unique_ptr<Assembler>(new X::Assembler##X());
Jan Voung90ccc3f2015-04-30 14:15:10 -0700134#include "llvm/Config/SZTargets.def"
Jan Voungb36ad9b2015-04-21 17:01:49 -0700135
136 Func->setError("Unsupported target assembler");
Jim Stichnothae953202014-12-20 06:17:49 -0800137 return nullptr;
Jan Voung8acded02014-09-22 18:02:25 -0700138}
139
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700140void TargetLowering::doAddressOpt() {
141 if (llvm::isa<InstLoad>(*Context.getCur()))
142 doAddressOptLoad();
143 else if (llvm::isa<InstStore>(*Context.getCur()))
144 doAddressOptStore();
145 Context.advanceCur();
146 Context.advanceNext();
147}
148
Qining Luaee5fa82015-08-20 14:59:03 -0700149void TargetLowering::doNopInsertion(RandomNumberGenerator &RNG) {
Jim Stichnoth607e9f02014-11-06 13:32:05 -0800150 Inst *I = Context.getCur();
Matt Walac3302742014-08-15 16:21:56 -0700151 bool ShouldSkip = llvm::isa<InstFakeUse>(I) || llvm::isa<InstFakeDef>(I) ||
152 llvm::isa<InstFakeKill>(I) || I->isRedundantAssign() ||
153 I->isDeleted();
154 if (!ShouldSkip) {
Jan Voung1f47ad02015-03-20 15:01:26 -0700155 int Probability = Ctx->getFlags().getNopProbabilityAsPercentage();
156 for (int I = 0; I < Ctx->getFlags().getMaxNopsPerInstruction(); ++I) {
Qining Luaee5fa82015-08-20 14:59:03 -0700157 randomlyInsertNop(Probability / 100.0, RNG);
Matt Walac3302742014-08-15 16:21:56 -0700158 }
159 }
160}
161
Andrew Scull57e12682015-09-16 11:30:19 -0700162// Lowers a single instruction according to the information in Context, by
163// checking the Context.Cur instruction kind and calling the appropriate
164// lowering method. The lowering method should insert target instructions at
165// the Cur.Next insertion point, and should not delete the Context.Cur
166// instruction or advance Context.Cur.
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700167//
Andrew Scull57e12682015-09-16 11:30:19 -0700168// The lowering method may look ahead in the instruction stream as desired, and
169// lower additional instructions in conjunction with the current one, for
170// example fusing a compare and branch. If it does, it should advance
171// Context.Cur to point to the next non-deleted instruction to process, and it
172// should delete any additional instructions it consumes.
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700173void TargetLowering::lower() {
174 assert(!Context.atEnd());
Jim Stichnoth607e9f02014-11-06 13:32:05 -0800175 Inst *Inst = Context.getCur();
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700176 Inst->deleteIfDead();
Jim Stichnothe4f65d82015-06-17 22:16:02 -0700177 if (!Inst->isDeleted() && !llvm::isa<InstFakeDef>(Inst) &&
178 !llvm::isa<InstFakeUse>(Inst)) {
Andrew Scull57e12682015-09-16 11:30:19 -0700179 // Mark the current instruction as deleted before lowering, otherwise the
180 // Dest variable will likely get marked as non-SSA. See
181 // Variable::setDefinition(). However, just pass-through FakeDef and
182 // FakeUse instructions that might have been inserted prior to lowering.
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700183 Inst->setDeleted();
184 switch (Inst->getKind()) {
185 case Inst::Alloca:
186 lowerAlloca(llvm::cast<InstAlloca>(Inst));
187 break;
188 case Inst::Arithmetic:
189 lowerArithmetic(llvm::cast<InstArithmetic>(Inst));
190 break;
191 case Inst::Assign:
192 lowerAssign(llvm::cast<InstAssign>(Inst));
193 break;
194 case Inst::Br:
195 lowerBr(llvm::cast<InstBr>(Inst));
196 break;
197 case Inst::Call:
198 lowerCall(llvm::cast<InstCall>(Inst));
199 break;
200 case Inst::Cast:
201 lowerCast(llvm::cast<InstCast>(Inst));
202 break;
203 case Inst::ExtractElement:
204 lowerExtractElement(llvm::cast<InstExtractElement>(Inst));
205 break;
206 case Inst::Fcmp:
207 lowerFcmp(llvm::cast<InstFcmp>(Inst));
208 break;
209 case Inst::Icmp:
210 lowerIcmp(llvm::cast<InstIcmp>(Inst));
211 break;
212 case Inst::InsertElement:
213 lowerInsertElement(llvm::cast<InstInsertElement>(Inst));
214 break;
215 case Inst::IntrinsicCall: {
216 InstIntrinsicCall *Call = llvm::cast<InstIntrinsicCall>(Inst);
217 if (Call->getIntrinsicInfo().ReturnsTwice)
218 setCallsReturnsTwice(true);
219 lowerIntrinsicCall(Call);
220 break;
221 }
222 case Inst::Load:
223 lowerLoad(llvm::cast<InstLoad>(Inst));
224 break;
225 case Inst::Phi:
226 lowerPhi(llvm::cast<InstPhi>(Inst));
227 break;
228 case Inst::Ret:
229 lowerRet(llvm::cast<InstRet>(Inst));
230 break;
231 case Inst::Select:
232 lowerSelect(llvm::cast<InstSelect>(Inst));
233 break;
234 case Inst::Store:
235 lowerStore(llvm::cast<InstStore>(Inst));
236 break;
237 case Inst::Switch:
238 lowerSwitch(llvm::cast<InstSwitch>(Inst));
239 break;
240 case Inst::Unreachable:
241 lowerUnreachable(llvm::cast<InstUnreachable>(Inst));
242 break;
Jim Stichnothe4f65d82015-06-17 22:16:02 -0700243 default:
244 lowerOther(Inst);
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700245 break;
246 }
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700247
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700248 postLower();
249 }
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700250
251 Context.advanceCur();
252 Context.advanceNext();
253}
254
Jim Stichnotha3f57b92015-07-30 12:46:04 -0700255void TargetLowering::lowerInst(CfgNode *Node, InstList::iterator Next,
256 InstHighLevel *Instr) {
257 // TODO(stichnot): Consider modifying the design/implementation to avoid
258 // multiple init() calls when using lowerInst() to lower several instructions
259 // in the same node.
260 Context.init(Node);
261 Context.setNext(Next);
262 Context.insert(Instr);
263 --Next;
264 assert(&*Next == Instr);
265 Context.setCur(Next);
266 lower();
267}
268
Jim Stichnothe4f65d82015-06-17 22:16:02 -0700269void TargetLowering::lowerOther(const Inst *Instr) {
270 (void)Instr;
271 Func->setError("Can't lower unsupported instruction type");
272}
273
Andrew Scull57e12682015-09-16 11:30:19 -0700274// Drives register allocation, allowing all physical registers (except perhaps
275// for the frame pointer) to be allocated. This set of registers could
276// potentially be parameterized if we want to restrict registers e.g. for
277// performance testing.
Jim Stichnoth70d0a052014-11-14 15:53:46 -0800278void TargetLowering::regAlloc(RegAllocKind Kind) {
Jim Stichnoth8363a062014-10-07 10:02:38 -0700279 TimerMarker T(TimerStack::TT_regAlloc, Func);
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700280 LinearScan LinearScan(Func);
281 RegSetMask RegInclude = RegSet_None;
282 RegSetMask RegExclude = RegSet_None;
283 RegInclude |= RegSet_CallerSave;
284 RegInclude |= RegSet_CalleeSave;
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700285 if (hasFramePointer())
286 RegExclude |= RegSet_FramePointer;
287 llvm::SmallBitVector RegMask = getRegisterSet(RegInclude, RegExclude);
Jim Stichnoth4001c932015-10-09 14:33:26 -0700288 bool Repeat = (Kind == RAK_Global && Ctx->getFlags().shouldRepeatRegAlloc());
289 do {
290 LinearScan.init(Kind);
291 LinearScan.scan(RegMask, Ctx->getFlags().shouldRandomizeRegAlloc());
292 if (!LinearScan.hasEvictions())
293 Repeat = false;
294 Kind = RAK_SecondChance;
295 } while (Repeat);
Jim Stichnotha1da6ff2015-11-16 15:59:39 -0800296 // TODO(stichnot): Run the register allocator one more time to do stack slot
297 // coalescing. The idea would be to initialize the Unhandled list with the
298 // set of Variables that have no register and a non-empty live range, and
299 // model an infinite number of registers. Maybe use the register aliasing
300 // mechanism to get better packing of narrower slots.
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700301}
302
Jim Stichnoth230d4102015-09-25 17:40:32 -0700303void TargetLowering::markRedefinitions() {
304 // Find (non-SSA) instructions where the Dest variable appears in some source
305 // operand, and set the IsDestRedefined flag to keep liveness analysis
306 // consistent.
Jan Voungb3401d22015-05-18 09:38:21 -0700307 for (auto Inst = Context.getCur(), E = Context.getNext(); Inst != E; ++Inst) {
308 if (Inst->isDeleted())
309 continue;
Jim Stichnoth230d4102015-09-25 17:40:32 -0700310 Variable *Dest = Inst->getDest();
311 if (Dest == nullptr)
312 continue;
313 FOREACH_VAR_IN_INST(Var, *Inst) {
314 if (Var == Dest) {
315 Inst->setDestRedefined();
316 break;
317 }
Jan Voungb3401d22015-05-18 09:38:21 -0700318 }
319 }
320}
321
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700322void TargetLowering::sortVarsByAlignment(VarList &Dest,
323 const VarList &Source) const {
324 Dest = Source;
Andrew Scull57e12682015-09-16 11:30:19 -0700325 // Instead of std::sort, we could do a bucket sort with log2(alignment) as
326 // the buckets, if performance is an issue.
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700327 std::sort(Dest.begin(), Dest.end(),
328 [this](const Variable *V1, const Variable *V2) {
Jim Stichnoth8e6bf6e2015-06-03 15:58:12 -0700329 return typeWidthInBytesOnStack(V1->getType()) >
330 typeWidthInBytesOnStack(V2->getType());
331 });
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700332}
333
334void TargetLowering::getVarStackSlotParams(
335 VarList &SortedSpilledVariables, llvm::SmallBitVector &RegsUsed,
336 size_t *GlobalsSize, size_t *SpillAreaSizeBytes,
337 uint32_t *SpillAreaAlignmentBytes, uint32_t *LocalsSlotsAlignmentBytes,
338 std::function<bool(Variable *)> TargetVarHook) {
339 const VariablesMetadata *VMetadata = Func->getVMetadata();
340 llvm::BitVector IsVarReferenced(Func->getNumVariables());
341 for (CfgNode *Node : Func->getNodes()) {
342 for (Inst &Inst : Node->getInsts()) {
343 if (Inst.isDeleted())
344 continue;
345 if (const Variable *Var = Inst.getDest())
346 IsVarReferenced[Var->getIndex()] = true;
John Portoec3f5652015-08-31 15:07:09 -0700347 FOREACH_VAR_IN_INST(Var, Inst) {
348 IsVarReferenced[Var->getIndex()] = true;
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700349 }
350 }
351 }
352
Andrew Scull57e12682015-09-16 11:30:19 -0700353 // If SimpleCoalescing is false, each variable without a register gets its
354 // own unique stack slot, which leads to large stack frames. If
355 // SimpleCoalescing is true, then each "global" variable without a register
356 // gets its own slot, but "local" variable slots are reused across basic
357 // blocks. E.g., if A and B are local to block 1 and C is local to block 2,
358 // then C may share a slot with A or B.
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700359 //
360 // We cannot coalesce stack slots if this function calls a "returns twice"
Andrew Scull57e12682015-09-16 11:30:19 -0700361 // function. In that case, basic blocks may be revisited, and variables local
362 // to those basic blocks are actually live until after the called function
363 // returns a second time.
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700364 const bool SimpleCoalescing = !callsReturnsTwice();
365
366 std::vector<size_t> LocalsSize(Func->getNumNodes());
367 const VarList &Variables = Func->getVariables();
368 VarList SpilledVariables;
369 for (Variable *Var : Variables) {
370 if (Var->hasReg()) {
371 RegsUsed[Var->getRegNum()] = true;
372 continue;
373 }
Andrew Scull57e12682015-09-16 11:30:19 -0700374 // An argument either does not need a stack slot (if passed in a register)
375 // or already has one (if passed on the stack).
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700376 if (Var->getIsArg())
377 continue;
378 // An unreferenced variable doesn't need a stack slot.
379 if (!IsVarReferenced[Var->getIndex()])
380 continue;
Andrew Scull57e12682015-09-16 11:30:19 -0700381 // Check a target-specific variable (it may end up sharing stack slots) and
382 // not need accounting here.
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700383 if (TargetVarHook(Var))
384 continue;
385 SpilledVariables.push_back(Var);
386 }
387
388 SortedSpilledVariables.reserve(SpilledVariables.size());
389 sortVarsByAlignment(SortedSpilledVariables, SpilledVariables);
390
391 for (Variable *Var : SortedSpilledVariables) {
392 size_t Increment = typeWidthInBytesOnStack(Var->getType());
Andrew Scull57e12682015-09-16 11:30:19 -0700393 // We have sorted by alignment, so the first variable we encounter that is
394 // located in each area determines the max alignment for the area.
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700395 if (!*SpillAreaAlignmentBytes)
396 *SpillAreaAlignmentBytes = Increment;
397 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
398 if (VMetadata->isMultiBlock(Var)) {
399 *GlobalsSize += Increment;
400 } else {
401 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
402 LocalsSize[NodeIndex] += Increment;
403 if (LocalsSize[NodeIndex] > *SpillAreaSizeBytes)
404 *SpillAreaSizeBytes = LocalsSize[NodeIndex];
405 if (!*LocalsSlotsAlignmentBytes)
406 *LocalsSlotsAlignmentBytes = Increment;
407 }
408 } else {
409 *SpillAreaSizeBytes += Increment;
410 }
411 }
Jan Voung28068ad2015-07-31 12:58:46 -0700412 // For testing legalization of large stack offsets on targets with limited
413 // offset bits in instruction encodings, add some padding.
414 *SpillAreaSizeBytes += Ctx->getFlags().getTestStackExtra();
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700415}
416
417void TargetLowering::alignStackSpillAreas(uint32_t SpillAreaStartOffset,
418 uint32_t SpillAreaAlignmentBytes,
419 size_t GlobalsSize,
420 uint32_t LocalsSlotsAlignmentBytes,
421 uint32_t *SpillAreaPaddingBytes,
422 uint32_t *LocalsSlotsPaddingBytes) {
423 if (SpillAreaAlignmentBytes) {
424 uint32_t PaddingStart = SpillAreaStartOffset;
425 uint32_t SpillAreaStart =
426 Utils::applyAlignment(PaddingStart, SpillAreaAlignmentBytes);
427 *SpillAreaPaddingBytes = SpillAreaStart - PaddingStart;
428 }
429
Andrew Scull57e12682015-09-16 11:30:19 -0700430 // If there are separate globals and locals areas, make sure the locals area
431 // is aligned by padding the end of the globals area.
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700432 if (LocalsSlotsAlignmentBytes) {
433 uint32_t GlobalsAndSubsequentPaddingSize = GlobalsSize;
434 GlobalsAndSubsequentPaddingSize =
435 Utils::applyAlignment(GlobalsSize, LocalsSlotsAlignmentBytes);
436 *LocalsSlotsPaddingBytes = GlobalsAndSubsequentPaddingSize - GlobalsSize;
437 }
438}
439
440void TargetLowering::assignVarStackSlots(VarList &SortedSpilledVariables,
441 size_t SpillAreaPaddingBytes,
442 size_t SpillAreaSizeBytes,
443 size_t GlobalsAndSubsequentPaddingSize,
444 bool UsesFramePointer) {
445 const VariablesMetadata *VMetadata = Func->getVMetadata();
Jan Voung28068ad2015-07-31 12:58:46 -0700446 // For testing legalization of large stack offsets on targets with limited
447 // offset bits in instruction encodings, add some padding. This assumes that
Andrew Scull57e12682015-09-16 11:30:19 -0700448 // SpillAreaSizeBytes has accounted for the extra test padding. When
449 // UseFramePointer is true, the offset depends on the padding, not just the
450 // SpillAreaSizeBytes. On the other hand, when UseFramePointer is false, the
451 // offsets depend on the gap between SpillAreaSizeBytes and
452 // SpillAreaPaddingBytes, so we don't increment that.
Jan Voung28068ad2015-07-31 12:58:46 -0700453 size_t TestPadding = Ctx->getFlags().getTestStackExtra();
454 if (UsesFramePointer)
455 SpillAreaPaddingBytes += TestPadding;
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700456 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes;
457 size_t NextStackOffset = SpillAreaPaddingBytes;
458 std::vector<size_t> LocalsSize(Func->getNumNodes());
459 const bool SimpleCoalescing = !callsReturnsTwice();
Jan Voung28068ad2015-07-31 12:58:46 -0700460
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700461 for (Variable *Var : SortedSpilledVariables) {
462 size_t Increment = typeWidthInBytesOnStack(Var->getType());
463 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
464 if (VMetadata->isMultiBlock(Var)) {
465 GlobalsSpaceUsed += Increment;
466 NextStackOffset = GlobalsSpaceUsed;
467 } else {
468 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
469 LocalsSize[NodeIndex] += Increment;
470 NextStackOffset = SpillAreaPaddingBytes +
471 GlobalsAndSubsequentPaddingSize +
472 LocalsSize[NodeIndex];
473 }
474 } else {
475 NextStackOffset += Increment;
476 }
477 if (UsesFramePointer)
478 Var->setStackOffset(-NextStackOffset);
479 else
480 Var->setStackOffset(SpillAreaSizeBytes - NextStackOffset);
481 }
482}
483
Jan Voungb36ad9b2015-04-21 17:01:49 -0700484InstCall *TargetLowering::makeHelperCall(const IceString &Name, Variable *Dest,
485 SizeT MaxSrcs) {
Jim Stichnoth5bff61c2015-10-28 09:26:00 -0700486 constexpr bool HasTailCall = false;
Jan Voungb36ad9b2015-04-21 17:01:49 -0700487 Constant *CallTarget = Ctx->getConstantExternSym(Name);
488 InstCall *Call =
489 InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall);
490 return Call;
491}
492
Andrew Scullcfa628b2015-08-20 14:23:05 -0700493bool TargetLowering::shouldOptimizeMemIntrins() {
494 return Ctx->getFlags().getOptLevel() >= Opt_1 ||
495 Ctx->getFlags().getForceMemIntrinOpt();
496}
497
Jan Voung76bb0be2015-05-14 09:26:19 -0700498void TargetLowering::emitWithoutPrefix(const ConstantRelocatable *C) const {
Jim Stichnoth20b71f52015-06-24 15:52:24 -0700499 if (!BuildDefs::dump())
Jan Voung76bb0be2015-05-14 09:26:19 -0700500 return;
501 Ostream &Str = Ctx->getStrEmit();
502 if (C->getSuppressMangling())
503 Str << C->getName();
504 else
505 Str << Ctx->mangleName(C->getName());
506 RelocOffsetT Offset = C->getOffset();
507 if (Offset) {
508 if (Offset > 0)
509 Str << "+";
510 Str << Offset;
511 }
512}
513
514void TargetLowering::emit(const ConstantRelocatable *C) const {
Jim Stichnoth20b71f52015-06-24 15:52:24 -0700515 if (!BuildDefs::dump())
Jan Voung76bb0be2015-05-14 09:26:19 -0700516 return;
517 Ostream &Str = Ctx->getStrEmit();
518 Str << getConstantPrefix();
519 emitWithoutPrefix(C);
520}
521
Jim Stichnothbbca7542015-02-11 16:08:31 -0800522std::unique_ptr<TargetDataLowering>
523TargetDataLowering::createLowering(GlobalContext *Ctx) {
Jan Voung1f47ad02015-03-20 15:01:26 -0700524 TargetArch Target = Ctx->getFlags().getTargetArch();
Jan Voungb36ad9b2015-04-21 17:01:49 -0700525#define SUBZERO_TARGET(X) \
526 if (Target == Target_##X) \
Jan Voungfb792842015-06-11 15:27:50 -0700527 return TargetData##X::create(Ctx);
Jan Voungb36ad9b2015-04-21 17:01:49 -0700528#include "llvm/Config/SZTargets.def"
529
Jan Voungfb792842015-06-11 15:27:50 -0700530 llvm::report_fatal_error("Unsupported target data lowering");
Jim Stichnothde4ca712014-06-29 08:13:48 -0700531}
532
Jim Stichnotheafb56c2015-06-22 10:35:22 -0700533TargetDataLowering::~TargetDataLowering() = default;
Jan Voung839c4ce2014-07-28 15:19:43 -0700534
John Porto8b1a7052015-06-17 13:20:08 -0700535namespace {
536
537// dataSectionSuffix decides whether to use SectionSuffix or MangledVarName as
538// data section suffix. Essentially, when using separate data sections for
539// globals SectionSuffix is not necessary.
540IceString dataSectionSuffix(const IceString &SectionSuffix,
541 const IceString &MangledVarName,
542 const bool DataSections) {
543 if (SectionSuffix.empty() && !DataSections) {
544 return "";
545 }
546
547 if (DataSections) {
548 // With data sections we don't need to use the SectionSuffix.
549 return "." + MangledVarName;
550 }
551
552 assert(!SectionSuffix.empty());
553 return "." + SectionSuffix;
554}
555
556} // end of anonymous namespace
557
558void TargetDataLowering::emitGlobal(const VariableDeclaration &Var,
559 const IceString &SectionSuffix) {
Jim Stichnoth20b71f52015-06-24 15:52:24 -0700560 if (!BuildDefs::dump())
Jan Voung58eea4d2015-06-15 15:11:56 -0700561 return;
562
Andrew Scull57e12682015-09-16 11:30:19 -0700563 // If external and not initialized, this must be a cross test. Don't generate
564 // a declaration for such cases.
John Porto8b1a7052015-06-17 13:20:08 -0700565 const bool IsExternal =
566 Var.isExternal() || Ctx->getFlags().getDisableInternal();
Jan Voung58eea4d2015-06-15 15:11:56 -0700567 if (IsExternal && !Var.hasInitializer())
568 return;
569
570 Ostream &Str = Ctx->getStrEmit();
John Porto8b1a7052015-06-17 13:20:08 -0700571 const bool HasNonzeroInitializer = Var.hasNonzeroInitializer();
572 const bool IsConstant = Var.getIsConstant();
573 const SizeT Size = Var.getNumBytes();
574 const IceString MangledName = Var.mangleName(Ctx);
Jan Voung58eea4d2015-06-15 15:11:56 -0700575
576 Str << "\t.type\t" << MangledName << ",%object\n";
577
John Porto8b1a7052015-06-17 13:20:08 -0700578 const bool UseDataSections = Ctx->getFlags().getDataSections();
579 const IceString Suffix =
580 dataSectionSuffix(SectionSuffix, MangledName, UseDataSections);
Jan Voung58eea4d2015-06-15 15:11:56 -0700581 if (IsConstant)
John Porto8b1a7052015-06-17 13:20:08 -0700582 Str << "\t.section\t.rodata" << Suffix << ",\"a\",%progbits\n";
Jan Voung58eea4d2015-06-15 15:11:56 -0700583 else if (HasNonzeroInitializer)
John Porto8b1a7052015-06-17 13:20:08 -0700584 Str << "\t.section\t.data" << Suffix << ",\"aw\",%progbits\n";
Jan Voung58eea4d2015-06-15 15:11:56 -0700585 else
John Porto8b1a7052015-06-17 13:20:08 -0700586 Str << "\t.section\t.bss" << Suffix << ",\"aw\",%nobits\n";
Jan Voung58eea4d2015-06-15 15:11:56 -0700587
588 if (IsExternal)
589 Str << "\t.globl\t" << MangledName << "\n";
590
John Porto8b1a7052015-06-17 13:20:08 -0700591 const uint32_t Align = Var.getAlignment();
Jan Voung58eea4d2015-06-15 15:11:56 -0700592 if (Align > 1) {
593 assert(llvm::isPowerOf2_32(Align));
594 // Use the .p2align directive, since the .align N directive can either
595 // interpret N as bytes, or power of 2 bytes, depending on the target.
596 Str << "\t.p2align\t" << llvm::Log2_32(Align) << "\n";
597 }
598
599 Str << MangledName << ":\n";
600
601 if (HasNonzeroInitializer) {
John Porto1bec8bc2015-06-22 10:51:13 -0700602 for (const std::unique_ptr<VariableDeclaration::Initializer> &Init :
603 Var.getInitializers()) {
Jan Voung58eea4d2015-06-15 15:11:56 -0700604 switch (Init->getKind()) {
605 case VariableDeclaration::Initializer::DataInitializerKind: {
Jan Vounge0df91f2015-06-30 08:47:06 -0700606 const auto &Data =
607 llvm::cast<VariableDeclaration::DataInitializer>(Init.get())
608 ->getContents();
Jan Voung58eea4d2015-06-15 15:11:56 -0700609 for (SizeT i = 0; i < Init->getNumBytes(); ++i) {
610 Str << "\t.byte\t" << (((unsigned)Data[i]) & 0xff) << "\n";
611 }
612 break;
613 }
614 case VariableDeclaration::Initializer::ZeroInitializerKind:
615 Str << "\t.zero\t" << Init->getNumBytes() << "\n";
616 break;
617 case VariableDeclaration::Initializer::RelocInitializerKind: {
John Porto8b1a7052015-06-17 13:20:08 -0700618 const auto *Reloc =
John Porto1bec8bc2015-06-22 10:51:13 -0700619 llvm::cast<VariableDeclaration::RelocInitializer>(Init.get());
Jan Voung58eea4d2015-06-15 15:11:56 -0700620 Str << "\t" << getEmit32Directive() << "\t";
621 Str << Reloc->getDeclaration()->mangleName(Ctx);
622 if (RelocOffsetT Offset = Reloc->getOffset()) {
623 if (Offset >= 0 || (Offset == INT32_MIN))
624 Str << " + " << Offset;
625 else
626 Str << " - " << -Offset;
627 }
628 Str << "\n";
629 break;
630 }
631 }
632 }
John Porto8b1a7052015-06-17 13:20:08 -0700633 } else {
Andrew Scull57e12682015-09-16 11:30:19 -0700634 // NOTE: for non-constant zero initializers, this is BSS (no bits), so an
635 // ELF writer would not write to the file, and only track virtual offsets,
636 // but the .s writer still needs this .zero and cannot simply use the .size
637 // to advance offsets.
Jan Voung58eea4d2015-06-15 15:11:56 -0700638 Str << "\t.zero\t" << Size << "\n";
John Porto8b1a7052015-06-17 13:20:08 -0700639 }
Jan Voung58eea4d2015-06-15 15:11:56 -0700640
641 Str << "\t.size\t" << MangledName << ", " << Size << "\n";
642}
643
Jan Voungfb792842015-06-11 15:27:50 -0700644std::unique_ptr<TargetHeaderLowering>
645TargetHeaderLowering::createLowering(GlobalContext *Ctx) {
646 TargetArch Target = Ctx->getFlags().getTargetArch();
647#define SUBZERO_TARGET(X) \
648 if (Target == Target_##X) \
649 return TargetHeader##X::create(Ctx);
650#include "llvm/Config/SZTargets.def"
651
652 llvm::report_fatal_error("Unsupported target header lowering");
653}
654
Jim Stichnotheafb56c2015-06-22 10:35:22 -0700655TargetHeaderLowering::~TargetHeaderLowering() = default;
Jan Voungfb792842015-06-11 15:27:50 -0700656
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700657} // end of namespace Ice