blob: 37b92d96bf5fd3d246525ce787fe3b7daa84f150 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "AArch64.h"
11#include "AArch64RegisterInfo.h"
12#include "MCTargetDesc/AArch64FixupKinds.h"
David Green9dd1d452018-08-22 11:31:39 +000013#include "MCTargetDesc/AArch64MCExpr.h"
Daniel Sanders50f17232015-09-15 16:17:27 +000014#include "llvm/ADT/Triple.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000015#include "llvm/BinaryFormat/MachO.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000016#include "llvm/MC/MCAsmBackend.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000017#include "llvm/MC/MCAssembler.h"
Oliver Stannarda5520b02016-04-01 09:14:50 +000018#include "llvm/MC/MCContext.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000019#include "llvm/MC/MCDirectives.h"
Chad Rosierafe7c932014-08-06 16:05:02 +000020#include "llvm/MC/MCELFObjectWriter.h"
Chandler Carruthd9903882015-01-14 11:23:27 +000021#include "llvm/MC/MCFixupKindInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000022#include "llvm/MC/MCObjectWriter.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000023#include "llvm/MC/MCSectionELF.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +000024#include "llvm/MC/MCSectionMachO.h"
Peter Collingbournee8813e62015-03-24 21:47:03 +000025#include "llvm/MC/MCValue.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000026#include "llvm/Support/ErrorHandling.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000027using namespace llvm;
28
29namespace {
30
31class AArch64AsmBackend : public MCAsmBackend {
32 static const unsigned PCRelFlagVal =
33 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +000034 Triple TheTriple;
35
Keith Walker8c44bf12016-01-20 15:59:14 +000036public:
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +000037 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
Peter Collingbourne571a3302018-05-21 17:57:19 +000038 : MCAsmBackend(IsLittleEndian ? support::little : support::big),
39 TheTriple(TT) {}
Tim Northover3b0846e2014-05-24 12:50:23 +000040
41 unsigned getNumFixupKinds() const override {
42 return AArch64::NumTargetFixupKinds;
43 }
44
45 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
46 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
Rafael Espindola3ac4c092017-06-20 22:53:29 +000047 // This table *must* be in the order that the fixup_* kinds are defined
48 // in AArch64FixupKinds.h.
49 //
50 // Name Offset (bits) Size (bits) Flags
51 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
52 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
53 {"fixup_aarch64_add_imm12", 10, 12, 0},
54 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
55 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
56 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
57 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
58 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
59 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
60 {"fixup_aarch64_movw", 5, 16, 0},
61 {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
62 {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
63 {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
64 {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal},
65 {"fixup_aarch64_tlsdesc_call", 0, 0, 0}};
Tim Northover3b0846e2014-05-24 12:50:23 +000066
67 if (Kind < FirstTargetFixupKind)
68 return MCAsmBackend::getFixupKindInfo(Kind);
69
70 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
71 "Invalid kind!");
72 return Infos[Kind - FirstTargetFixupKind];
73 }
74
Rafael Espindola801b42d2017-06-23 22:52:36 +000075 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
76 const MCValue &Target, MutableArrayRef<char> Data,
Peter Smith57f661b2018-06-06 09:40:06 +000077 uint64_t Value, bool IsResolved,
78 const MCSubtargetInfo *STI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +000079
Peter Smith57f661b2018-06-06 09:40:06 +000080 bool mayNeedRelaxation(const MCInst &Inst,
81 const MCSubtargetInfo &STI) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +000082 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
83 const MCRelaxableFragment *DF,
84 const MCAsmLayout &Layout) const override;
Nirav Dave86030622016-07-11 14:23:53 +000085 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
86 MCInst &Res) const override;
Peter Collingbourne571a3302018-05-21 17:57:19 +000087 bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
Tim Northover3b0846e2014-05-24 12:50:23 +000088
89 void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
90
91 unsigned getPointerSize() const { return 8; }
Keith Walker8c44bf12016-01-20 15:59:14 +000092
93 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
Martin Storsjob2e9fcf2017-07-19 20:14:32 +000094
95 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
96 const MCValue &Target) override;
Tim Northover3b0846e2014-05-24 12:50:23 +000097};
98
99} // end anonymous namespace
100
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000101/// The number of bytes the fixup may change.
Tim Northover3b0846e2014-05-24 12:50:23 +0000102static unsigned getFixupKindNumBytes(unsigned Kind) {
103 switch (Kind) {
104 default:
Craig Topper2a30d782014-06-18 05:05:13 +0000105 llvm_unreachable("Unknown fixup kind!");
Tim Northover3b0846e2014-05-24 12:50:23 +0000106
107 case AArch64::fixup_aarch64_tlsdesc_call:
108 return 0;
109
110 case FK_Data_1:
111 return 1;
112
Tim Northover3b0846e2014-05-24 12:50:23 +0000113 case AArch64::fixup_aarch64_movw:
Mandeep Singh Granga210f1d2017-07-17 00:05:32 +0000114 case FK_Data_2:
115 case FK_SecRel_2:
Tim Northover3b0846e2014-05-24 12:50:23 +0000116 return 2;
117
118 case AArch64::fixup_aarch64_pcrel_branch14:
119 case AArch64::fixup_aarch64_add_imm12:
120 case AArch64::fixup_aarch64_ldst_imm12_scale1:
121 case AArch64::fixup_aarch64_ldst_imm12_scale2:
122 case AArch64::fixup_aarch64_ldst_imm12_scale4:
123 case AArch64::fixup_aarch64_ldst_imm12_scale8:
124 case AArch64::fixup_aarch64_ldst_imm12_scale16:
125 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
126 case AArch64::fixup_aarch64_pcrel_branch19:
127 return 3;
128
129 case AArch64::fixup_aarch64_pcrel_adr_imm21:
130 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
131 case AArch64::fixup_aarch64_pcrel_branch26:
132 case AArch64::fixup_aarch64_pcrel_call26:
133 case FK_Data_4:
Mandeep Singh Granga210f1d2017-07-17 00:05:32 +0000134 case FK_SecRel_4:
Tim Northover3b0846e2014-05-24 12:50:23 +0000135 return 4;
136
137 case FK_Data_8:
138 return 8;
139 }
140}
141
142static unsigned AdrImmBits(unsigned Value) {
143 unsigned lo2 = Value & 0x3;
144 unsigned hi19 = (Value & 0x1ffffc) >> 2;
145 return (hi19 << 5) | (lo2 << 29);
146}
147
Oliver Stannarda5520b02016-04-01 09:14:50 +0000148static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000149 MCContext &Ctx, const Triple &TheTriple,
150 bool IsResolved) {
Oliver Stannarda5520b02016-04-01 09:14:50 +0000151 unsigned Kind = Fixup.getKind();
Tim Northover3b0846e2014-05-24 12:50:23 +0000152 int64_t SignedValue = static_cast<int64_t>(Value);
153 switch (Kind) {
154 default:
Craig Topperd3c02f12015-01-05 10:15:49 +0000155 llvm_unreachable("Unknown fixup kind!");
Tim Northover3b0846e2014-05-24 12:50:23 +0000156 case AArch64::fixup_aarch64_pcrel_adr_imm21:
Alex Bradbury866113c2017-04-05 10:16:14 +0000157 if (SignedValue > 2097151 || SignedValue < -2097152)
158 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
Tim Northover3b0846e2014-05-24 12:50:23 +0000159 return AdrImmBits(Value & 0x1fffffULL);
160 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000161 assert(!IsResolved);
162 if (TheTriple.isOSBinFormatCOFF())
163 return AdrImmBits(Value & 0x1fffffULL);
Tim Northover3b0846e2014-05-24 12:50:23 +0000164 return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
165 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
166 case AArch64::fixup_aarch64_pcrel_branch19:
167 // Signed 21-bit immediate
168 if (SignedValue > 2097151 || SignedValue < -2097152)
Alex Bradbury866113c2017-04-05 10:16:14 +0000169 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
170 if (Value & 0x3)
171 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
Tim Northover3b0846e2014-05-24 12:50:23 +0000172 // Low two bits are not encoded.
173 return (Value >> 2) & 0x7ffff;
174 case AArch64::fixup_aarch64_add_imm12:
175 case AArch64::fixup_aarch64_ldst_imm12_scale1:
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000176 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
177 Value &= 0xfff;
Tim Northover3b0846e2014-05-24 12:50:23 +0000178 // Unsigned 12-bit immediate
Alex Bradbury866113c2017-04-05 10:16:14 +0000179 if (Value >= 0x1000)
180 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
Tim Northover3b0846e2014-05-24 12:50:23 +0000181 return Value;
182 case AArch64::fixup_aarch64_ldst_imm12_scale2:
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000183 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
184 Value &= 0xfff;
Tim Northover3b0846e2014-05-24 12:50:23 +0000185 // Unsigned 12-bit immediate which gets multiplied by 2
Alex Bradbury866113c2017-04-05 10:16:14 +0000186 if (Value >= 0x2000)
187 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
188 if (Value & 0x1)
189 Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
Tim Northover3b0846e2014-05-24 12:50:23 +0000190 return Value >> 1;
191 case AArch64::fixup_aarch64_ldst_imm12_scale4:
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000192 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
193 Value &= 0xfff;
Tim Northover3b0846e2014-05-24 12:50:23 +0000194 // Unsigned 12-bit immediate which gets multiplied by 4
Alex Bradbury866113c2017-04-05 10:16:14 +0000195 if (Value >= 0x4000)
196 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
197 if (Value & 0x3)
198 Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
Tim Northover3b0846e2014-05-24 12:50:23 +0000199 return Value >> 2;
200 case AArch64::fixup_aarch64_ldst_imm12_scale8:
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000201 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
202 Value &= 0xfff;
Tim Northover3b0846e2014-05-24 12:50:23 +0000203 // Unsigned 12-bit immediate which gets multiplied by 8
Alex Bradbury866113c2017-04-05 10:16:14 +0000204 if (Value >= 0x8000)
205 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
206 if (Value & 0x7)
207 Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
Tim Northover3b0846e2014-05-24 12:50:23 +0000208 return Value >> 3;
209 case AArch64::fixup_aarch64_ldst_imm12_scale16:
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000210 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
211 Value &= 0xfff;
Tim Northover3b0846e2014-05-24 12:50:23 +0000212 // Unsigned 12-bit immediate which gets multiplied by 16
Alex Bradbury866113c2017-04-05 10:16:14 +0000213 if (Value >= 0x10000)
214 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
215 if (Value & 0xf)
216 Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
Tim Northover3b0846e2014-05-24 12:50:23 +0000217 return Value >> 4;
218 case AArch64::fixup_aarch64_movw:
Alex Bradbury866113c2017-04-05 10:16:14 +0000219 Ctx.reportError(Fixup.getLoc(),
220 "no resolvable MOVZ/MOVK fixups supported yet");
Tim Northover3b0846e2014-05-24 12:50:23 +0000221 return Value;
222 case AArch64::fixup_aarch64_pcrel_branch14:
223 // Signed 16-bit immediate
Alex Bradbury866113c2017-04-05 10:16:14 +0000224 if (SignedValue > 32767 || SignedValue < -32768)
225 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
Tim Northover3b0846e2014-05-24 12:50:23 +0000226 // Low two bits are not encoded (4-byte alignment assumed).
Alex Bradbury866113c2017-04-05 10:16:14 +0000227 if (Value & 0x3)
228 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
Tim Northover3b0846e2014-05-24 12:50:23 +0000229 return (Value >> 2) & 0x3fff;
230 case AArch64::fixup_aarch64_pcrel_branch26:
231 case AArch64::fixup_aarch64_pcrel_call26:
232 // Signed 28-bit immediate
Alex Bradbury866113c2017-04-05 10:16:14 +0000233 if (SignedValue > 134217727 || SignedValue < -134217728)
234 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
Tim Northover3b0846e2014-05-24 12:50:23 +0000235 // Low two bits are not encoded (4-byte alignment assumed).
Alex Bradbury866113c2017-04-05 10:16:14 +0000236 if (Value & 0x3)
237 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
Tim Northover3b0846e2014-05-24 12:50:23 +0000238 return (Value >> 2) & 0x3ffffff;
239 case FK_Data_1:
240 case FK_Data_2:
241 case FK_Data_4:
242 case FK_Data_8:
Mandeep Singh Granga210f1d2017-07-17 00:05:32 +0000243 case FK_SecRel_2:
244 case FK_SecRel_4:
Tim Northover3b0846e2014-05-24 12:50:23 +0000245 return Value;
246 }
247}
248
Keith Walker8c44bf12016-01-20 15:59:14 +0000249/// getFixupKindContainereSizeInBytes - The number of bytes of the
250/// container involved in big endian or 0 if the item is little endian
251unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
Peter Collingbourne571a3302018-05-21 17:57:19 +0000252 if (Endian == support::little)
Keith Walker8c44bf12016-01-20 15:59:14 +0000253 return 0;
254
255 switch (Kind) {
256 default:
257 llvm_unreachable("Unknown fixup kind!");
258
259 case FK_Data_1:
260 return 1;
261 case FK_Data_2:
262 return 2;
263 case FK_Data_4:
264 return 4;
265 case FK_Data_8:
266 return 8;
267
268 case AArch64::fixup_aarch64_tlsdesc_call:
269 case AArch64::fixup_aarch64_movw:
270 case AArch64::fixup_aarch64_pcrel_branch14:
271 case AArch64::fixup_aarch64_add_imm12:
272 case AArch64::fixup_aarch64_ldst_imm12_scale1:
273 case AArch64::fixup_aarch64_ldst_imm12_scale2:
274 case AArch64::fixup_aarch64_ldst_imm12_scale4:
275 case AArch64::fixup_aarch64_ldst_imm12_scale8:
276 case AArch64::fixup_aarch64_ldst_imm12_scale16:
277 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
278 case AArch64::fixup_aarch64_pcrel_branch19:
279 case AArch64::fixup_aarch64_pcrel_adr_imm21:
280 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
281 case AArch64::fixup_aarch64_pcrel_branch26:
282 case AArch64::fixup_aarch64_pcrel_call26:
283 // Instructions are always little endian
284 return 0;
285 }
286}
287
Rafael Espindola801b42d2017-06-23 22:52:36 +0000288void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
289 const MCValue &Target,
Rafael Espindola88d9e372017-06-21 23:06:53 +0000290 MutableArrayRef<char> Data, uint64_t Value,
Peter Smith57f661b2018-06-06 09:40:06 +0000291 bool IsResolved,
292 const MCSubtargetInfo *STI) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000293 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
294 if (!Value)
295 return; // Doesn't change encoding.
296 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
Rafael Espindolaf3512922017-06-24 00:26:57 +0000297 MCContext &Ctx = Asm.getContext();
Tim Northover3b0846e2014-05-24 12:50:23 +0000298 // Apply any target-specific value adjustments.
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000299 Value = adjustFixupValue(Fixup, Value, Ctx, TheTriple, IsResolved);
Tim Northover3b0846e2014-05-24 12:50:23 +0000300
301 // Shift the value into position.
302 Value <<= Info.TargetOffset;
303
304 unsigned Offset = Fixup.getOffset();
Rafael Espindola88d9e372017-06-21 23:06:53 +0000305 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
Tim Northover3b0846e2014-05-24 12:50:23 +0000306
Keith Walker8c44bf12016-01-20 15:59:14 +0000307 // Used to point to big endian bytes.
308 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
309
Tim Northover3b0846e2014-05-24 12:50:23 +0000310 // For each byte of the fragment that the fixup touches, mask in the
311 // bits from the fixup value.
Keith Walker8c44bf12016-01-20 15:59:14 +0000312 if (FulleSizeInBytes == 0) {
313 // Handle as little-endian
314 for (unsigned i = 0; i != NumBytes; ++i) {
315 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
316 }
317 } else {
318 // Handle as big-endian
Rafael Espindola88d9e372017-06-21 23:06:53 +0000319 assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
Keith Walker8c44bf12016-01-20 15:59:14 +0000320 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
321 for (unsigned i = 0; i != NumBytes; ++i) {
322 unsigned Idx = FulleSizeInBytes - 1 - i;
323 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
324 }
325 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000326}
327
Peter Smith57f661b2018-06-06 09:40:06 +0000328bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst,
329 const MCSubtargetInfo &STI) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000330 return false;
331}
332
333bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
334 uint64_t Value,
335 const MCRelaxableFragment *DF,
336 const MCAsmLayout &Layout) const {
337 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
338 // into the targets for now.
339 //
340 // Relax if the value is too big for a (signed) i8.
341 return int64_t(Value) != int64_t(int8_t(Value));
342}
343
344void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
Nirav Dave86030622016-07-11 14:23:53 +0000345 const MCSubtargetInfo &STI,
Tim Northover3b0846e2014-05-24 12:50:23 +0000346 MCInst &Res) const {
Craig Topperd3c02f12015-01-05 10:15:49 +0000347 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
Tim Northover3b0846e2014-05-24 12:50:23 +0000348}
349
Peter Collingbourne571a3302018-05-21 17:57:19 +0000350bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000351 // If the count is not 4-byte aligned, we must be writing data into the text
352 // section (otherwise we have unaligned instructions, and thus have far
353 // bigger problems), so just write zeros instead.
Peter Collingbourne571a3302018-05-21 17:57:19 +0000354 OS.write_zeros(Count % 4);
Tim Northover3b0846e2014-05-24 12:50:23 +0000355
356 // We are properly aligned, so write NOPs as requested.
357 Count /= 4;
358 for (uint64_t i = 0; i != Count; ++i)
Peter Collingbourne571a3302018-05-21 17:57:19 +0000359 support::endian::write<uint32_t>(OS, 0xd503201f, Endian);
Tim Northover3b0846e2014-05-24 12:50:23 +0000360 return true;
361}
362
Martin Storsjob2e9fcf2017-07-19 20:14:32 +0000363bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
364 const MCFixup &Fixup,
365 const MCValue &Target) {
366 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
367 // ~0xfff. This means that the required offset to reach a symbol can vary by
368 // up to one step depending on where the ADRP is in memory. For example:
369 //
370 // ADRP x0, there
371 // there:
372 //
373 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
374 // we'll need that as an offset. At any other address "there" will be in the
375 // same page as the ADRP and the instruction should encode 0x0. Assuming the
376 // section isn't 0x1000-aligned, we therefore need to delegate this decision
377 // to the linker -- a relocation!
378 if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
379 return true;
David Green9dd1d452018-08-22 11:31:39 +0000380
381 AArch64MCExpr::VariantKind RefKind =
382 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
383 AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
384 // LDR GOT relocations need a relocation
385 if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_ldr_pcrel_imm19 &&
386 SymLoc == AArch64MCExpr::VK_GOT)
387 return true;
Martin Storsjob2e9fcf2017-07-19 20:14:32 +0000388 return false;
389}
390
Tim Northover3b0846e2014-05-24 12:50:23 +0000391namespace {
392
393namespace CU {
394
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000395/// Compact unwind encoding values.
Tim Northover3b0846e2014-05-24 12:50:23 +0000396enum CompactUnwindEncodings {
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000397 /// A "frameless" leaf function, where no non-volatile registers are
Tim Northover3b0846e2014-05-24 12:50:23 +0000398 /// saved. The return remains in LR throughout the function.
Tim Northover87442c12016-02-23 21:49:05 +0000399 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
Tim Northover3b0846e2014-05-24 12:50:23 +0000400
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000401 /// No compact unwind encoding available. Instead the low 23-bits of
Tim Northover3b0846e2014-05-24 12:50:23 +0000402 /// the compact unwind encoding is the offset of the DWARF FDE in the
403 /// __eh_frame section. This mode is never used in object files. It is only
404 /// generated by the linker in final linked images, which have only DWARF info
405 /// for a function.
Tim Northover87442c12016-02-23 21:49:05 +0000406 UNWIND_ARM64_MODE_DWARF = 0x03000000,
Tim Northover3b0846e2014-05-24 12:50:23 +0000407
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000408 /// This is a standard arm64 prologue where FP/LR are immediately
Tim Northover3b0846e2014-05-24 12:50:23 +0000409 /// pushed on the stack, then SP is copied to FP. If there are any
410 /// non-volatile register saved, they are copied into the stack fame in pairs
411 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
412 /// five X pairs and four D pairs can be saved, but the memory layout must be
413 /// in register number order.
Tim Northover87442c12016-02-23 21:49:05 +0000414 UNWIND_ARM64_MODE_FRAME = 0x04000000,
Tim Northover3b0846e2014-05-24 12:50:23 +0000415
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000416 /// Frame register pair encodings.
Tim Northover87442c12016-02-23 21:49:05 +0000417 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
418 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
419 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
420 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
421 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
422 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
423 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
424 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
425 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
Tim Northover3b0846e2014-05-24 12:50:23 +0000426};
427
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000428} // end CU namespace
Tim Northover3b0846e2014-05-24 12:50:23 +0000429
430// FIXME: This should be in a separate file.
431class DarwinAArch64AsmBackend : public AArch64AsmBackend {
432 const MCRegisterInfo &MRI;
433
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000434 /// Encode compact unwind stack adjustment for frameless functions.
Tim Northover87442c12016-02-23 21:49:05 +0000435 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
Tim Northover3b0846e2014-05-24 12:50:23 +0000436 /// The stack size always needs to be 16 byte aligned.
437 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
438 return (StackSize / 16) << 12;
439 }
440
441public:
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000442 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
443 const MCRegisterInfo &MRI)
444 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
Tim Northover3b0846e2014-05-24 12:50:23 +0000445
Peter Collingbournedcd7d6c2018-05-21 19:20:29 +0000446 std::unique_ptr<MCObjectTargetWriter>
447 createObjectTargetWriter() const override {
448 return createAArch64MachObjectWriter(MachO::CPU_TYPE_ARM64,
Tim Northover3b0846e2014-05-24 12:50:23 +0000449 MachO::CPU_SUBTYPE_ARM64_ALL);
450 }
451
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000452 /// Generate the compact unwind encoding from the CFI directives.
Tim Northover3b0846e2014-05-24 12:50:23 +0000453 uint32_t generateCompactUnwindEncoding(
454 ArrayRef<MCCFIInstruction> Instrs) const override {
455 if (Instrs.empty())
Tim Northover87442c12016-02-23 21:49:05 +0000456 return CU::UNWIND_ARM64_MODE_FRAMELESS;
Tim Northover3b0846e2014-05-24 12:50:23 +0000457
458 bool HasFP = false;
459 unsigned StackSize = 0;
460
461 uint32_t CompactUnwindEncoding = 0;
462 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
463 const MCCFIInstruction &Inst = Instrs[i];
464
465 switch (Inst.getOperation()) {
466 default:
467 // Cannot handle this directive: bail out.
Tim Northover87442c12016-02-23 21:49:05 +0000468 return CU::UNWIND_ARM64_MODE_DWARF;
Tim Northover3b0846e2014-05-24 12:50:23 +0000469 case MCCFIInstruction::OpDefCfa: {
470 // Defines a frame pointer.
Francis Visoiu Mistrih90aba022018-05-31 16:33:26 +0000471 unsigned XReg =
472 getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true));
473
474 // Other CFA registers than FP are not supported by compact unwind.
475 // Fallback on DWARF.
476 // FIXME: When opt-remarks are supported in MC, add a remark to notify
477 // the user.
478 if (XReg != AArch64::FP)
479 return CU::UNWIND_ARM64_MODE_DWARF;
480
481 assert(XReg == AArch64::FP && "Invalid frame pointer!");
Tim Northover3b0846e2014-05-24 12:50:23 +0000482 assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
483
484 const MCCFIInstruction &LRPush = Instrs[++i];
485 assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
486 "Link register not pushed!");
487 const MCCFIInstruction &FPPush = Instrs[++i];
488 assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
489 "Frame pointer not pushed!");
490
491 unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true);
492 unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true);
493
494 LRReg = getXRegFromWReg(LRReg);
495 FPReg = getXRegFromWReg(FPReg);
496
497 assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
498 "Pushing invalid registers for frame!");
499
500 // Indicate that the function has a frame.
Tim Northover87442c12016-02-23 21:49:05 +0000501 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
Tim Northover3b0846e2014-05-24 12:50:23 +0000502 HasFP = true;
503 break;
504 }
505 case MCCFIInstruction::OpDefCfaOffset: {
506 assert(StackSize == 0 && "We already have the CFA offset!");
507 StackSize = std::abs(Inst.getOffset());
508 break;
509 }
510 case MCCFIInstruction::OpOffset: {
511 // Registers are saved in pairs. We expect there to be two consecutive
512 // `.cfi_offset' instructions with the appropriate registers specified.
513 unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true);
514 if (i + 1 == e)
Tim Northover87442c12016-02-23 21:49:05 +0000515 return CU::UNWIND_ARM64_MODE_DWARF;
Tim Northover3b0846e2014-05-24 12:50:23 +0000516
517 const MCCFIInstruction &Inst2 = Instrs[++i];
518 if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
Tim Northover87442c12016-02-23 21:49:05 +0000519 return CU::UNWIND_ARM64_MODE_DWARF;
Tim Northover3b0846e2014-05-24 12:50:23 +0000520 unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true);
521
522 // N.B. The encodings must be in register number order, and the X
523 // registers before the D registers.
524
525 // X19/X20 pair = 0x00000001,
526 // X21/X22 pair = 0x00000002,
527 // X23/X24 pair = 0x00000004,
528 // X25/X26 pair = 0x00000008,
529 // X27/X28 pair = 0x00000010
530 Reg1 = getXRegFromWReg(Reg1);
531 Reg2 = getXRegFromWReg(Reg2);
532
533 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
534 (CompactUnwindEncoding & 0xF1E) == 0)
Tim Northover87442c12016-02-23 21:49:05 +0000535 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000536 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
537 (CompactUnwindEncoding & 0xF1C) == 0)
Tim Northover87442c12016-02-23 21:49:05 +0000538 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000539 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
540 (CompactUnwindEncoding & 0xF18) == 0)
Tim Northover87442c12016-02-23 21:49:05 +0000541 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000542 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
543 (CompactUnwindEncoding & 0xF10) == 0)
Tim Northover87442c12016-02-23 21:49:05 +0000544 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000545 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
546 (CompactUnwindEncoding & 0xF00) == 0)
Tim Northover87442c12016-02-23 21:49:05 +0000547 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000548 else {
549 Reg1 = getDRegFromBReg(Reg1);
550 Reg2 = getDRegFromBReg(Reg2);
551
552 // D8/D9 pair = 0x00000100,
553 // D10/D11 pair = 0x00000200,
554 // D12/D13 pair = 0x00000400,
555 // D14/D15 pair = 0x00000800
556 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
557 (CompactUnwindEncoding & 0xE00) == 0)
Tim Northover87442c12016-02-23 21:49:05 +0000558 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000559 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
560 (CompactUnwindEncoding & 0xC00) == 0)
Tim Northover87442c12016-02-23 21:49:05 +0000561 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000562 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
563 (CompactUnwindEncoding & 0x800) == 0)
Tim Northover87442c12016-02-23 21:49:05 +0000564 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000565 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
Tim Northover87442c12016-02-23 21:49:05 +0000566 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
Tim Northover3b0846e2014-05-24 12:50:23 +0000567 else
568 // A pair was pushed which we cannot handle.
Tim Northover87442c12016-02-23 21:49:05 +0000569 return CU::UNWIND_ARM64_MODE_DWARF;
Tim Northover3b0846e2014-05-24 12:50:23 +0000570 }
571
572 break;
573 }
574 }
575 }
576
577 if (!HasFP) {
578 // With compact unwind info we can only represent stack adjustments of up
579 // to 65520 bytes.
580 if (StackSize > 65520)
Tim Northover87442c12016-02-23 21:49:05 +0000581 return CU::UNWIND_ARM64_MODE_DWARF;
Tim Northover3b0846e2014-05-24 12:50:23 +0000582
Tim Northover87442c12016-02-23 21:49:05 +0000583 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
Tim Northover3b0846e2014-05-24 12:50:23 +0000584 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
585 }
586
587 return CompactUnwindEncoding;
588 }
589};
590
591} // end anonymous namespace
592
593namespace {
594
595class ELFAArch64AsmBackend : public AArch64AsmBackend {
596public:
597 uint8_t OSABI;
Joel Jones504bf332016-10-24 13:37:13 +0000598 bool IsILP32;
Tim Northover3b0846e2014-05-24 12:50:23 +0000599
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000600 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
601 bool IsLittleEndian, bool IsILP32)
602 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
603 IsILP32(IsILP32) {}
Tim Northover3b0846e2014-05-24 12:50:23 +0000604
Peter Collingbournedcd7d6c2018-05-21 19:20:29 +0000605 std::unique_ptr<MCObjectTargetWriter>
606 createObjectTargetWriter() const override {
607 return createAArch64ELFObjectWriter(OSABI, IsILP32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000608 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000609};
610
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000611}
Tim Northover3b0846e2014-05-24 12:50:23 +0000612
Mandeep Singh Grang0c721722017-06-27 23:58:19 +0000613namespace {
614class COFFAArch64AsmBackend : public AArch64AsmBackend {
615public:
616 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000617 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
Mandeep Singh Grang0c721722017-06-27 23:58:19 +0000618
Peter Collingbournedcd7d6c2018-05-21 19:20:29 +0000619 std::unique_ptr<MCObjectTargetWriter>
620 createObjectTargetWriter() const override {
621 return createAArch64WinCOFFObjectWriter();
Mandeep Singh Grang0c721722017-06-27 23:58:19 +0000622 }
623};
624}
625
Tim Northover3b0846e2014-05-24 12:50:23 +0000626MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
Alex Bradburyb22f7512018-01-03 08:53:05 +0000627 const MCSubtargetInfo &STI,
Daniel Sanders418caf52015-06-10 10:35:34 +0000628 const MCRegisterInfo &MRI,
Joel Jones373d7d32016-07-25 17:18:28 +0000629 const MCTargetOptions &Options) {
Alex Bradburyb22f7512018-01-03 08:53:05 +0000630 const Triple &TheTriple = STI.getTargetTriple();
Daniel Sanders50f17232015-09-15 16:17:27 +0000631 if (TheTriple.isOSBinFormatMachO())
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000632 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000633
Mandeep Singh Grang0c721722017-06-27 23:58:19 +0000634 if (TheTriple.isOSBinFormatCOFF())
635 return new COFFAArch64AsmBackend(T, TheTriple);
636
Mandeep Singh Grang6f61e232017-06-28 19:37:38 +0000637 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
Mandeep Singh Grang0c721722017-06-27 23:58:19 +0000638
Daniel Sanders50f17232015-09-15 16:17:27 +0000639 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
Joel Jones504bf332016-10-24 13:37:13 +0000640 bool IsILP32 = Options.getABIName() == "ilp32";
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000641 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
642 IsILP32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000643}
644
645MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
Alex Bradburyb22f7512018-01-03 08:53:05 +0000646 const MCSubtargetInfo &STI,
Daniel Sanders418caf52015-06-10 10:35:34 +0000647 const MCRegisterInfo &MRI,
Joel Jones373d7d32016-07-25 17:18:28 +0000648 const MCTargetOptions &Options) {
Alex Bradburyb22f7512018-01-03 08:53:05 +0000649 const Triple &TheTriple = STI.getTargetTriple();
Daniel Sanders50f17232015-09-15 16:17:27 +0000650 assert(TheTriple.isOSBinFormatELF() &&
Tim Northover3b0846e2014-05-24 12:50:23 +0000651 "Big endian is only supported for ELF targets!");
Daniel Sanders50f17232015-09-15 16:17:27 +0000652 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
Joel Jones504bf332016-10-24 13:37:13 +0000653 bool IsILP32 = Options.getABIName() == "ilp32";
Martin Storsjo0b7bf7a2017-07-26 11:19:17 +0000654 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
655 IsILP32);
Tim Northover3b0846e2014-05-24 12:50:23 +0000656}