blob: 3e93b768cfb24d1384090ecf1c9a023dc6f38218 [file] [log] [blame]
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -07001//===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===//
2//
3// The Subzero Code Generator
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
Andrew Scull9612d322015-07-06 14:53:25 -07009///
10/// \file
11/// This file implements the skeleton of the TargetLowering class,
12/// specifically invoking the appropriate lowering method for a given
13/// instruction kind and driving global register allocation. It also
14/// implements the non-deleted instruction iteration in
15/// LoweringContext.
16///
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070017//===----------------------------------------------------------------------===//
18
John Porto67f8de92015-06-25 10:14:17 -070019#include "IceTargetLowering.h"
20
John Portoaff4ccf2015-06-10 16:35:06 -070021#include "IceAssemblerARM32.h"
John Porto2da710c2015-06-29 07:57:02 -070022#include "IceAssemblerMIPS32.h"
John Portoaff4ccf2015-06-10 16:35:06 -070023#include "IceAssemblerX8632.h"
John Porto7e93c622015-06-23 10:58:57 -070024#include "IceAssemblerX8664.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070025#include "IceCfg.h" // setError()
26#include "IceCfgNode.h"
Jan Voung58eea4d2015-06-15 15:11:56 -070027#include "IceGlobalInits.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070028#include "IceOperand.h"
Jim Stichnothd97c7df2014-06-04 11:57:08 -070029#include "IceRegAlloc.h"
Jan Voungb36ad9b2015-04-21 17:01:49 -070030#include "IceTargetLoweringARM32.h"
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070031#include "IceTargetLoweringMIPS32.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070032#include "IceTargetLoweringX8632.h"
John Porto7e93c622015-06-23 10:58:57 -070033#include "IceTargetLoweringX8664.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070034
35namespace Ice {
36
37void LoweringContext::init(CfgNode *N) {
38 Node = N;
Jim Stichnoth336f6c42014-10-30 15:01:31 -070039 End = getNode()->getInsts().end();
40 rewind();
41 advanceForward(Next);
42}
43
44void LoweringContext::rewind() {
Jan Vounge6e497d2014-07-30 10:06:03 -070045 Begin = getNode()->getInsts().begin();
46 Cur = Begin;
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070047 skipDeleted(Cur);
48 Next = Cur;
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070049}
50
51void LoweringContext::insert(Inst *Inst) {
52 getNode()->getInsts().insert(Next, Inst);
Jim Stichnoth98712a32014-10-24 10:59:02 -070053 LastInserted = Inst;
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070054}
55
Jan Voungc820ddf2014-07-29 14:38:51 -070056void LoweringContext::skipDeleted(InstList::iterator &I) const {
Jim Stichnoth607e9f02014-11-06 13:32:05 -080057 while (I != End && I->isDeleted())
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070058 ++I;
59}
60
Jan Vounge6e497d2014-07-30 10:06:03 -070061void LoweringContext::advanceForward(InstList::iterator &I) const {
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070062 if (I != End) {
63 ++I;
64 skipDeleted(I);
65 }
66}
67
Jan Vounge6e497d2014-07-30 10:06:03 -070068Inst *LoweringContext::getLastInserted() const {
Jim Stichnoth98712a32014-10-24 10:59:02 -070069 assert(LastInserted);
70 return LastInserted;
Jan Vounge6e497d2014-07-30 10:06:03 -070071}
72
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070073TargetLowering *TargetLowering::createLowering(TargetArch Target, Cfg *Func) {
Jan Voungb36ad9b2015-04-21 17:01:49 -070074#define SUBZERO_TARGET(X) \
75 if (Target == Target_##X) \
76 return Target##X::create(Func);
77#include "llvm/Config/SZTargets.def"
78
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070079 Func->setError("Unsupported target");
Jim Stichnothae953202014-12-20 06:17:49 -080080 return nullptr;
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070081}
82
Jim Stichnothe6d24782014-12-19 05:42:24 -080083TargetLowering::TargetLowering(Cfg *Func)
Jim Stichnotheafb56c2015-06-22 10:35:22 -070084 : Func(Func), Ctx(Func->getContext()), Context() {}
Jim Stichnothe6d24782014-12-19 05:42:24 -080085
Jan Voungec270732015-01-12 17:00:22 -080086std::unique_ptr<Assembler> TargetLowering::createAssembler(TargetArch Target,
87 Cfg *Func) {
Jan Voung90ccc3f2015-04-30 14:15:10 -070088#define SUBZERO_TARGET(X) \
89 if (Target == Target_##X) \
90 return std::unique_ptr<Assembler>(new X::Assembler##X());
91#include "llvm/Config/SZTargets.def"
Jan Voungb36ad9b2015-04-21 17:01:49 -070092
93 Func->setError("Unsupported target assembler");
Jim Stichnothae953202014-12-20 06:17:49 -080094 return nullptr;
Jan Voung8acded02014-09-22 18:02:25 -070095}
96
Jim Stichnothd97c7df2014-06-04 11:57:08 -070097void TargetLowering::doAddressOpt() {
98 if (llvm::isa<InstLoad>(*Context.getCur()))
99 doAddressOptLoad();
100 else if (llvm::isa<InstStore>(*Context.getCur()))
101 doAddressOptStore();
102 Context.advanceCur();
103 Context.advanceNext();
104}
105
Matt Walac3302742014-08-15 16:21:56 -0700106void TargetLowering::doNopInsertion() {
Jim Stichnoth607e9f02014-11-06 13:32:05 -0800107 Inst *I = Context.getCur();
Matt Walac3302742014-08-15 16:21:56 -0700108 bool ShouldSkip = llvm::isa<InstFakeUse>(I) || llvm::isa<InstFakeDef>(I) ||
109 llvm::isa<InstFakeKill>(I) || I->isRedundantAssign() ||
110 I->isDeleted();
111 if (!ShouldSkip) {
Jan Voung1f47ad02015-03-20 15:01:26 -0700112 int Probability = Ctx->getFlags().getNopProbabilityAsPercentage();
113 for (int I = 0; I < Ctx->getFlags().getMaxNopsPerInstruction(); ++I) {
114 randomlyInsertNop(Probability / 100.0);
Matt Walac3302742014-08-15 16:21:56 -0700115 }
116 }
117}
118
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700119// Lowers a single instruction according to the information in
120// Context, by checking the Context.Cur instruction kind and calling
121// the appropriate lowering method. The lowering method should insert
122// target instructions at the Cur.Next insertion point, and should not
123// delete the Context.Cur instruction or advance Context.Cur.
124//
125// The lowering method may look ahead in the instruction stream as
126// desired, and lower additional instructions in conjunction with the
127// current one, for example fusing a compare and branch. If it does,
128// it should advance Context.Cur to point to the next non-deleted
129// instruction to process, and it should delete any additional
130// instructions it consumes.
131void TargetLowering::lower() {
132 assert(!Context.atEnd());
Jim Stichnoth607e9f02014-11-06 13:32:05 -0800133 Inst *Inst = Context.getCur();
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700134 Inst->deleteIfDead();
Jim Stichnothe4f65d82015-06-17 22:16:02 -0700135 if (!Inst->isDeleted() && !llvm::isa<InstFakeDef>(Inst) &&
136 !llvm::isa<InstFakeUse>(Inst)) {
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700137 // Mark the current instruction as deleted before lowering,
138 // otherwise the Dest variable will likely get marked as non-SSA.
Jim Stichnothe4f65d82015-06-17 22:16:02 -0700139 // See Variable::setDefinition(). However, just pass-through
140 // FakeDef and FakeUse instructions that might have been inserted
141 // prior to lowering.
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700142 Inst->setDeleted();
143 switch (Inst->getKind()) {
144 case Inst::Alloca:
145 lowerAlloca(llvm::cast<InstAlloca>(Inst));
146 break;
147 case Inst::Arithmetic:
148 lowerArithmetic(llvm::cast<InstArithmetic>(Inst));
149 break;
150 case Inst::Assign:
151 lowerAssign(llvm::cast<InstAssign>(Inst));
152 break;
153 case Inst::Br:
154 lowerBr(llvm::cast<InstBr>(Inst));
155 break;
156 case Inst::Call:
157 lowerCall(llvm::cast<InstCall>(Inst));
158 break;
159 case Inst::Cast:
160 lowerCast(llvm::cast<InstCast>(Inst));
161 break;
162 case Inst::ExtractElement:
163 lowerExtractElement(llvm::cast<InstExtractElement>(Inst));
164 break;
165 case Inst::Fcmp:
166 lowerFcmp(llvm::cast<InstFcmp>(Inst));
167 break;
168 case Inst::Icmp:
169 lowerIcmp(llvm::cast<InstIcmp>(Inst));
170 break;
171 case Inst::InsertElement:
172 lowerInsertElement(llvm::cast<InstInsertElement>(Inst));
173 break;
174 case Inst::IntrinsicCall: {
175 InstIntrinsicCall *Call = llvm::cast<InstIntrinsicCall>(Inst);
176 if (Call->getIntrinsicInfo().ReturnsTwice)
177 setCallsReturnsTwice(true);
178 lowerIntrinsicCall(Call);
179 break;
180 }
181 case Inst::Load:
182 lowerLoad(llvm::cast<InstLoad>(Inst));
183 break;
184 case Inst::Phi:
185 lowerPhi(llvm::cast<InstPhi>(Inst));
186 break;
187 case Inst::Ret:
188 lowerRet(llvm::cast<InstRet>(Inst));
189 break;
190 case Inst::Select:
191 lowerSelect(llvm::cast<InstSelect>(Inst));
192 break;
193 case Inst::Store:
194 lowerStore(llvm::cast<InstStore>(Inst));
195 break;
196 case Inst::Switch:
197 lowerSwitch(llvm::cast<InstSwitch>(Inst));
198 break;
199 case Inst::Unreachable:
200 lowerUnreachable(llvm::cast<InstUnreachable>(Inst));
201 break;
Jim Stichnothe4f65d82015-06-17 22:16:02 -0700202 default:
203 lowerOther(Inst);
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700204 break;
205 }
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700206
Jim Stichnotha59ae6f2015-05-17 10:11:41 -0700207 postLower();
208 }
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700209
210 Context.advanceCur();
211 Context.advanceNext();
212}
213
Jim Stichnotha3f57b92015-07-30 12:46:04 -0700214void TargetLowering::lowerInst(CfgNode *Node, InstList::iterator Next,
215 InstHighLevel *Instr) {
216 // TODO(stichnot): Consider modifying the design/implementation to avoid
217 // multiple init() calls when using lowerInst() to lower several instructions
218 // in the same node.
219 Context.init(Node);
220 Context.setNext(Next);
221 Context.insert(Instr);
222 --Next;
223 assert(&*Next == Instr);
224 Context.setCur(Next);
225 lower();
226}
227
Jim Stichnothe4f65d82015-06-17 22:16:02 -0700228void TargetLowering::lowerOther(const Inst *Instr) {
229 (void)Instr;
230 Func->setError("Can't lower unsupported instruction type");
231}
232
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700233// Drives register allocation, allowing all physical registers (except
234// perhaps for the frame pointer) to be allocated. This set of
235// registers could potentially be parameterized if we want to restrict
236// registers e.g. for performance testing.
Jim Stichnoth70d0a052014-11-14 15:53:46 -0800237void TargetLowering::regAlloc(RegAllocKind Kind) {
Jim Stichnoth8363a062014-10-07 10:02:38 -0700238 TimerMarker T(TimerStack::TT_regAlloc, Func);
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700239 LinearScan LinearScan(Func);
240 RegSetMask RegInclude = RegSet_None;
241 RegSetMask RegExclude = RegSet_None;
242 RegInclude |= RegSet_CallerSave;
243 RegInclude |= RegSet_CalleeSave;
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700244 if (hasFramePointer())
245 RegExclude |= RegSet_FramePointer;
Jim Stichnoth70d0a052014-11-14 15:53:46 -0800246 LinearScan.init(Kind);
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700247 llvm::SmallBitVector RegMask = getRegisterSet(RegInclude, RegExclude);
Jan Voung1f47ad02015-03-20 15:01:26 -0700248 LinearScan.scan(RegMask, Ctx->getFlags().shouldRandomizeRegAlloc());
Jim Stichnothd97c7df2014-06-04 11:57:08 -0700249}
250
Jan Voungb3401d22015-05-18 09:38:21 -0700251void TargetLowering::inferTwoAddress() {
252 // Find two-address non-SSA instructions where Dest==Src0, and set
253 // the DestNonKillable flag to keep liveness analysis consistent.
254 for (auto Inst = Context.getCur(), E = Context.getNext(); Inst != E; ++Inst) {
255 if (Inst->isDeleted())
256 continue;
257 if (Variable *Dest = Inst->getDest()) {
258 // TODO(stichnot): We may need to consider all source
259 // operands, not just the first one, if using 3-address
260 // instructions.
261 if (Inst->getSrcSize() > 0 && Inst->getSrc(0) == Dest)
262 Inst->setDestNonKillable();
263 }
264 }
265}
266
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700267void TargetLowering::sortVarsByAlignment(VarList &Dest,
268 const VarList &Source) const {
269 Dest = Source;
270 // Instead of std::sort, we could do a bucket sort with log2(alignment)
271 // as the buckets, if performance is an issue.
272 std::sort(Dest.begin(), Dest.end(),
273 [this](const Variable *V1, const Variable *V2) {
Jim Stichnoth8e6bf6e2015-06-03 15:58:12 -0700274 return typeWidthInBytesOnStack(V1->getType()) >
275 typeWidthInBytesOnStack(V2->getType());
276 });
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700277}
278
279void TargetLowering::getVarStackSlotParams(
280 VarList &SortedSpilledVariables, llvm::SmallBitVector &RegsUsed,
281 size_t *GlobalsSize, size_t *SpillAreaSizeBytes,
282 uint32_t *SpillAreaAlignmentBytes, uint32_t *LocalsSlotsAlignmentBytes,
283 std::function<bool(Variable *)> TargetVarHook) {
284 const VariablesMetadata *VMetadata = Func->getVMetadata();
285 llvm::BitVector IsVarReferenced(Func->getNumVariables());
286 for (CfgNode *Node : Func->getNodes()) {
287 for (Inst &Inst : Node->getInsts()) {
288 if (Inst.isDeleted())
289 continue;
290 if (const Variable *Var = Inst.getDest())
291 IsVarReferenced[Var->getIndex()] = true;
292 for (SizeT I = 0; I < Inst.getSrcSize(); ++I) {
293 Operand *Src = Inst.getSrc(I);
294 SizeT NumVars = Src->getNumVars();
295 for (SizeT J = 0; J < NumVars; ++J) {
296 const Variable *Var = Src->getVar(J);
297 IsVarReferenced[Var->getIndex()] = true;
298 }
299 }
300 }
301 }
302
303 // If SimpleCoalescing is false, each variable without a register
304 // gets its own unique stack slot, which leads to large stack
305 // frames. If SimpleCoalescing is true, then each "global" variable
306 // without a register gets its own slot, but "local" variable slots
307 // are reused across basic blocks. E.g., if A and B are local to
308 // block 1 and C is local to block 2, then C may share a slot with A or B.
309 //
310 // We cannot coalesce stack slots if this function calls a "returns twice"
311 // function. In that case, basic blocks may be revisited, and variables
312 // local to those basic blocks are actually live until after the
313 // called function returns a second time.
314 const bool SimpleCoalescing = !callsReturnsTwice();
315
316 std::vector<size_t> LocalsSize(Func->getNumNodes());
317 const VarList &Variables = Func->getVariables();
318 VarList SpilledVariables;
319 for (Variable *Var : Variables) {
320 if (Var->hasReg()) {
321 RegsUsed[Var->getRegNum()] = true;
322 continue;
323 }
324 // An argument either does not need a stack slot (if passed in a
325 // register) or already has one (if passed on the stack).
326 if (Var->getIsArg())
327 continue;
328 // An unreferenced variable doesn't need a stack slot.
329 if (!IsVarReferenced[Var->getIndex()])
330 continue;
331 // Check a target-specific variable (it may end up sharing stack slots)
332 // and not need accounting here.
333 if (TargetVarHook(Var))
334 continue;
335 SpilledVariables.push_back(Var);
336 }
337
338 SortedSpilledVariables.reserve(SpilledVariables.size());
339 sortVarsByAlignment(SortedSpilledVariables, SpilledVariables);
340
341 for (Variable *Var : SortedSpilledVariables) {
342 size_t Increment = typeWidthInBytesOnStack(Var->getType());
343 // We have sorted by alignment, so the first variable we encounter that
344 // is located in each area determines the max alignment for the area.
345 if (!*SpillAreaAlignmentBytes)
346 *SpillAreaAlignmentBytes = Increment;
347 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
348 if (VMetadata->isMultiBlock(Var)) {
349 *GlobalsSize += Increment;
350 } else {
351 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
352 LocalsSize[NodeIndex] += Increment;
353 if (LocalsSize[NodeIndex] > *SpillAreaSizeBytes)
354 *SpillAreaSizeBytes = LocalsSize[NodeIndex];
355 if (!*LocalsSlotsAlignmentBytes)
356 *LocalsSlotsAlignmentBytes = Increment;
357 }
358 } else {
359 *SpillAreaSizeBytes += Increment;
360 }
361 }
Jan Voung28068ad2015-07-31 12:58:46 -0700362 // For testing legalization of large stack offsets on targets with limited
363 // offset bits in instruction encodings, add some padding.
364 *SpillAreaSizeBytes += Ctx->getFlags().getTestStackExtra();
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700365}
366
367void TargetLowering::alignStackSpillAreas(uint32_t SpillAreaStartOffset,
368 uint32_t SpillAreaAlignmentBytes,
369 size_t GlobalsSize,
370 uint32_t LocalsSlotsAlignmentBytes,
371 uint32_t *SpillAreaPaddingBytes,
372 uint32_t *LocalsSlotsPaddingBytes) {
373 if (SpillAreaAlignmentBytes) {
374 uint32_t PaddingStart = SpillAreaStartOffset;
375 uint32_t SpillAreaStart =
376 Utils::applyAlignment(PaddingStart, SpillAreaAlignmentBytes);
377 *SpillAreaPaddingBytes = SpillAreaStart - PaddingStart;
378 }
379
380 // If there are separate globals and locals areas, make sure the
381 // locals area is aligned by padding the end of the globals area.
382 if (LocalsSlotsAlignmentBytes) {
383 uint32_t GlobalsAndSubsequentPaddingSize = GlobalsSize;
384 GlobalsAndSubsequentPaddingSize =
385 Utils::applyAlignment(GlobalsSize, LocalsSlotsAlignmentBytes);
386 *LocalsSlotsPaddingBytes = GlobalsAndSubsequentPaddingSize - GlobalsSize;
387 }
388}
389
390void TargetLowering::assignVarStackSlots(VarList &SortedSpilledVariables,
391 size_t SpillAreaPaddingBytes,
392 size_t SpillAreaSizeBytes,
393 size_t GlobalsAndSubsequentPaddingSize,
394 bool UsesFramePointer) {
395 const VariablesMetadata *VMetadata = Func->getVMetadata();
Jan Voung28068ad2015-07-31 12:58:46 -0700396 // For testing legalization of large stack offsets on targets with limited
397 // offset bits in instruction encodings, add some padding. This assumes that
398 // SpillAreaSizeBytes has accounted for the extra test padding.
399 // When UseFramePointer is true, the offset depends on the padding,
400 // not just the SpillAreaSizeBytes. On the other hand, when UseFramePointer
401 // is false, the offsets depend on the gap between SpillAreaSizeBytes
402 // and SpillAreaPaddingBytes, so we don't increment that.
403 size_t TestPadding = Ctx->getFlags().getTestStackExtra();
404 if (UsesFramePointer)
405 SpillAreaPaddingBytes += TestPadding;
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700406 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes;
407 size_t NextStackOffset = SpillAreaPaddingBytes;
408 std::vector<size_t> LocalsSize(Func->getNumNodes());
409 const bool SimpleCoalescing = !callsReturnsTwice();
Jan Voung28068ad2015-07-31 12:58:46 -0700410
Jan Voung0fa6c5a2015-06-01 11:04:04 -0700411 for (Variable *Var : SortedSpilledVariables) {
412 size_t Increment = typeWidthInBytesOnStack(Var->getType());
413 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
414 if (VMetadata->isMultiBlock(Var)) {
415 GlobalsSpaceUsed += Increment;
416 NextStackOffset = GlobalsSpaceUsed;
417 } else {
418 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
419 LocalsSize[NodeIndex] += Increment;
420 NextStackOffset = SpillAreaPaddingBytes +
421 GlobalsAndSubsequentPaddingSize +
422 LocalsSize[NodeIndex];
423 }
424 } else {
425 NextStackOffset += Increment;
426 }
427 if (UsesFramePointer)
428 Var->setStackOffset(-NextStackOffset);
429 else
430 Var->setStackOffset(SpillAreaSizeBytes - NextStackOffset);
431 }
432}
433
Jan Voungb36ad9b2015-04-21 17:01:49 -0700434InstCall *TargetLowering::makeHelperCall(const IceString &Name, Variable *Dest,
435 SizeT MaxSrcs) {
436 const bool HasTailCall = false;
437 Constant *CallTarget = Ctx->getConstantExternSym(Name);
438 InstCall *Call =
439 InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall);
440 return Call;
441}
442
Andrew Scullcfa628b2015-08-20 14:23:05 -0700443bool TargetLowering::shouldOptimizeMemIntrins() {
444 return Ctx->getFlags().getOptLevel() >= Opt_1 ||
445 Ctx->getFlags().getForceMemIntrinOpt();
446}
447
Jan Voung76bb0be2015-05-14 09:26:19 -0700448void TargetLowering::emitWithoutPrefix(const ConstantRelocatable *C) const {
Jim Stichnoth20b71f52015-06-24 15:52:24 -0700449 if (!BuildDefs::dump())
Jan Voung76bb0be2015-05-14 09:26:19 -0700450 return;
451 Ostream &Str = Ctx->getStrEmit();
452 if (C->getSuppressMangling())
453 Str << C->getName();
454 else
455 Str << Ctx->mangleName(C->getName());
456 RelocOffsetT Offset = C->getOffset();
457 if (Offset) {
458 if (Offset > 0)
459 Str << "+";
460 Str << Offset;
461 }
462}
463
464void TargetLowering::emit(const ConstantRelocatable *C) const {
Jim Stichnoth20b71f52015-06-24 15:52:24 -0700465 if (!BuildDefs::dump())
Jan Voung76bb0be2015-05-14 09:26:19 -0700466 return;
467 Ostream &Str = Ctx->getStrEmit();
468 Str << getConstantPrefix();
469 emitWithoutPrefix(C);
470}
471
Jim Stichnothbbca7542015-02-11 16:08:31 -0800472std::unique_ptr<TargetDataLowering>
473TargetDataLowering::createLowering(GlobalContext *Ctx) {
Jan Voung1f47ad02015-03-20 15:01:26 -0700474 TargetArch Target = Ctx->getFlags().getTargetArch();
Jan Voungb36ad9b2015-04-21 17:01:49 -0700475#define SUBZERO_TARGET(X) \
476 if (Target == Target_##X) \
Jan Voungfb792842015-06-11 15:27:50 -0700477 return TargetData##X::create(Ctx);
Jan Voungb36ad9b2015-04-21 17:01:49 -0700478#include "llvm/Config/SZTargets.def"
479
Jan Voungfb792842015-06-11 15:27:50 -0700480 llvm::report_fatal_error("Unsupported target data lowering");
Jim Stichnothde4ca712014-06-29 08:13:48 -0700481}
482
Jim Stichnotheafb56c2015-06-22 10:35:22 -0700483TargetDataLowering::~TargetDataLowering() = default;
Jan Voung839c4ce2014-07-28 15:19:43 -0700484
John Porto8b1a7052015-06-17 13:20:08 -0700485namespace {
486
487// dataSectionSuffix decides whether to use SectionSuffix or MangledVarName as
488// data section suffix. Essentially, when using separate data sections for
489// globals SectionSuffix is not necessary.
490IceString dataSectionSuffix(const IceString &SectionSuffix,
491 const IceString &MangledVarName,
492 const bool DataSections) {
493 if (SectionSuffix.empty() && !DataSections) {
494 return "";
495 }
496
497 if (DataSections) {
498 // With data sections we don't need to use the SectionSuffix.
499 return "." + MangledVarName;
500 }
501
502 assert(!SectionSuffix.empty());
503 return "." + SectionSuffix;
504}
505
506} // end of anonymous namespace
507
508void TargetDataLowering::emitGlobal(const VariableDeclaration &Var,
509 const IceString &SectionSuffix) {
Jim Stichnoth20b71f52015-06-24 15:52:24 -0700510 if (!BuildDefs::dump())
Jan Voung58eea4d2015-06-15 15:11:56 -0700511 return;
512
513 // If external and not initialized, this must be a cross test.
514 // Don't generate a declaration for such cases.
John Porto8b1a7052015-06-17 13:20:08 -0700515 const bool IsExternal =
516 Var.isExternal() || Ctx->getFlags().getDisableInternal();
Jan Voung58eea4d2015-06-15 15:11:56 -0700517 if (IsExternal && !Var.hasInitializer())
518 return;
519
520 Ostream &Str = Ctx->getStrEmit();
John Porto8b1a7052015-06-17 13:20:08 -0700521 const bool HasNonzeroInitializer = Var.hasNonzeroInitializer();
522 const bool IsConstant = Var.getIsConstant();
523 const SizeT Size = Var.getNumBytes();
524 const IceString MangledName = Var.mangleName(Ctx);
Jan Voung58eea4d2015-06-15 15:11:56 -0700525
526 Str << "\t.type\t" << MangledName << ",%object\n";
527
John Porto8b1a7052015-06-17 13:20:08 -0700528 const bool UseDataSections = Ctx->getFlags().getDataSections();
529 const IceString Suffix =
530 dataSectionSuffix(SectionSuffix, MangledName, UseDataSections);
Jan Voung58eea4d2015-06-15 15:11:56 -0700531 if (IsConstant)
John Porto8b1a7052015-06-17 13:20:08 -0700532 Str << "\t.section\t.rodata" << Suffix << ",\"a\",%progbits\n";
Jan Voung58eea4d2015-06-15 15:11:56 -0700533 else if (HasNonzeroInitializer)
John Porto8b1a7052015-06-17 13:20:08 -0700534 Str << "\t.section\t.data" << Suffix << ",\"aw\",%progbits\n";
Jan Voung58eea4d2015-06-15 15:11:56 -0700535 else
John Porto8b1a7052015-06-17 13:20:08 -0700536 Str << "\t.section\t.bss" << Suffix << ",\"aw\",%nobits\n";
Jan Voung58eea4d2015-06-15 15:11:56 -0700537
538 if (IsExternal)
539 Str << "\t.globl\t" << MangledName << "\n";
540
John Porto8b1a7052015-06-17 13:20:08 -0700541 const uint32_t Align = Var.getAlignment();
Jan Voung58eea4d2015-06-15 15:11:56 -0700542 if (Align > 1) {
543 assert(llvm::isPowerOf2_32(Align));
544 // Use the .p2align directive, since the .align N directive can either
545 // interpret N as bytes, or power of 2 bytes, depending on the target.
546 Str << "\t.p2align\t" << llvm::Log2_32(Align) << "\n";
547 }
548
549 Str << MangledName << ":\n";
550
551 if (HasNonzeroInitializer) {
John Porto1bec8bc2015-06-22 10:51:13 -0700552 for (const std::unique_ptr<VariableDeclaration::Initializer> &Init :
553 Var.getInitializers()) {
Jan Voung58eea4d2015-06-15 15:11:56 -0700554 switch (Init->getKind()) {
555 case VariableDeclaration::Initializer::DataInitializerKind: {
Jan Vounge0df91f2015-06-30 08:47:06 -0700556 const auto &Data =
557 llvm::cast<VariableDeclaration::DataInitializer>(Init.get())
558 ->getContents();
Jan Voung58eea4d2015-06-15 15:11:56 -0700559 for (SizeT i = 0; i < Init->getNumBytes(); ++i) {
560 Str << "\t.byte\t" << (((unsigned)Data[i]) & 0xff) << "\n";
561 }
562 break;
563 }
564 case VariableDeclaration::Initializer::ZeroInitializerKind:
565 Str << "\t.zero\t" << Init->getNumBytes() << "\n";
566 break;
567 case VariableDeclaration::Initializer::RelocInitializerKind: {
John Porto8b1a7052015-06-17 13:20:08 -0700568 const auto *Reloc =
John Porto1bec8bc2015-06-22 10:51:13 -0700569 llvm::cast<VariableDeclaration::RelocInitializer>(Init.get());
Jan Voung58eea4d2015-06-15 15:11:56 -0700570 Str << "\t" << getEmit32Directive() << "\t";
571 Str << Reloc->getDeclaration()->mangleName(Ctx);
572 if (RelocOffsetT Offset = Reloc->getOffset()) {
573 if (Offset >= 0 || (Offset == INT32_MIN))
574 Str << " + " << Offset;
575 else
576 Str << " - " << -Offset;
577 }
578 Str << "\n";
579 break;
580 }
581 }
582 }
John Porto8b1a7052015-06-17 13:20:08 -0700583 } else {
Jan Voung58eea4d2015-06-15 15:11:56 -0700584 // NOTE: for non-constant zero initializers, this is BSS (no bits),
585 // so an ELF writer would not write to the file, and only track
586 // virtual offsets, but the .s writer still needs this .zero and
587 // cannot simply use the .size to advance offsets.
588 Str << "\t.zero\t" << Size << "\n";
John Porto8b1a7052015-06-17 13:20:08 -0700589 }
Jan Voung58eea4d2015-06-15 15:11:56 -0700590
591 Str << "\t.size\t" << MangledName << ", " << Size << "\n";
592}
593
Jan Voungfb792842015-06-11 15:27:50 -0700594std::unique_ptr<TargetHeaderLowering>
595TargetHeaderLowering::createLowering(GlobalContext *Ctx) {
596 TargetArch Target = Ctx->getFlags().getTargetArch();
597#define SUBZERO_TARGET(X) \
598 if (Target == Target_##X) \
599 return TargetHeader##X::create(Ctx);
600#include "llvm/Config/SZTargets.def"
601
602 llvm::report_fatal_error("Unsupported target header lowering");
603}
604
Jim Stichnotheafb56c2015-06-22 10:35:22 -0700605TargetHeaderLowering::~TargetHeaderLowering() = default;
Jan Voungfb792842015-06-11 15:27:50 -0700606
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700607} // end of namespace Ice