blob: c9fcedec50f09bc8e0824db90a9c41a62f9deac1 [file] [log] [blame]
Lang Hamesa5216882014-07-17 18:54:50 +00001//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H
11#define LLVM_RUNTIMEDYLDMACHOAARCH64_H
12
13#include "../RuntimeDyldMachO.h"
14
15#define DEBUG_TYPE "dyld"
16
17namespace llvm {
18
19class RuntimeDyldMachOAArch64
20 : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
21public:
22 RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM)
23 : RuntimeDyldMachOCRTPBase(MM) {}
24
25 unsigned getMaxStubSize() override { return 8; }
26
27 unsigned getStubAlignment() override { return 1; }
28
29 relocation_iterator
30 processRelocationRef(unsigned SectionID, relocation_iterator RelI,
31 ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
32 const SymbolTableMap &Symbols, StubMap &Stubs) override {
33 const MachOObjectFile &Obj =
34 static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
35 MachO::any_relocation_info RelInfo =
36 Obj.getRelocation(RelI->getRawDataRefImpl());
37
38 assert(!Obj.isRelocationScattered(RelInfo) && "");
39
40 // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
41 // addend for the following relocation. If found: (1) store the associated
42 // addend, (2) consume the next relocation, and (3) use the stored addend to
43 // override the addend.
44 bool HasExplicitAddend = false;
45 int64_t ExplicitAddend = 0;
46 if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
47 assert(!Obj.getPlainRelocationExternal(RelInfo));
48 assert(!Obj.getAnyRelocationPCRel(RelInfo));
49 assert(Obj.getAnyRelocationLength(RelInfo) == 2);
50 HasExplicitAddend = true;
51 int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
52 // Sign-extend the 24-bit to 64-bit.
53 ExplicitAddend = (RawAddend << 40) >> 40;
54 ++RelI;
55 RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
56 }
57
58 RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
59 RelocationValueRef Value(
60 getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
61
62 if (HasExplicitAddend)
63 Value.Addend = ExplicitAddend;
64
65 bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
66 if (!IsExtern && RE.IsPCRel)
67 makeValueAddendPCRel(Value, ObjImg, RelI);
68
69 RE.Addend = Value.Addend;
70
71 if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
72 RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
73 processGOTRelocation(RE, Value, Stubs);
74 else {
75 if (Value.SymbolName)
76 addRelocationForSymbol(RE, Value.SymbolName);
77 else
78 addRelocationForSection(RE, Value.SectionID);
79 }
80
81 return ++RelI;
82 }
83
84 void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
85 DEBUG(dumpRelocationToResolve(RE, Value));
86
87 const SectionEntry &Section = Sections[RE.SectionID];
88 uint8_t *LocalAddress = Section.Address + RE.Offset;
89
90 switch (RE.RelType) {
91 default:
92 llvm_unreachable("Invalid relocation type!");
93 case MachO::ARM64_RELOC_UNSIGNED: {
94 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
95 // Mask in the target value a byte at a time (we don't have an alignment
96 // guarantee for the target address, so this is safest).
97 if (RE.Size < 2)
98 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
99
100 writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
101 break;
102 }
103 case MachO::ARM64_RELOC_BRANCH26: {
104 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
105 // Mask the value into the target address. We know instructions are
106 // 32-bit aligned, so we can do it all at once.
107 uint32_t *p = (uint32_t *)LocalAddress;
108 // Check if the addend is encoded in the instruction.
109 uint32_t EncodedAddend = *p & 0x03FFFFFF;
110 if (EncodedAddend != 0) {
111 if (RE.Addend == 0)
112 llvm_unreachable("branch26 instruction has embedded addend.");
113 else
114 llvm_unreachable("branch26 instruction has embedded addend and"
115 "ARM64_RELOC_ADDEND.");
116 }
117 // Check if branch is in range.
118 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
119 uint64_t PCRelVal = Value - FinalAddress + RE.Addend;
120 assert(isInt<26>(PCRelVal) && "Branch target out of range!");
121 // Insert the value into the instruction.
122 *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF);
123 break;
124 }
125 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
126 case MachO::ARM64_RELOC_PAGE21: {
127 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
128 // Mask the value into the target address. We know instructions are
129 // 32-bit aligned, so we can do it all at once.
130 uint32_t *p = (uint32_t *)LocalAddress;
131 // Check if the addend is encoded in the instruction.
132 uint32_t EncodedAddend =
133 ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3);
134 if (EncodedAddend != 0) {
135 if (RE.Addend == 0)
136 llvm_unreachable("adrp instruction has embedded addend.");
137 else
138 llvm_unreachable("adrp instruction has embedded addend and"
139 "ARM64_RELOC_ADDEND.");
140 }
141 // Adjust for PC-relative relocation and offset.
142 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
143 uint64_t PCRelVal =
144 ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
145 // Check that the value fits into 21 bits (+ 12 lower bits).
146 assert(isInt<33>(PCRelVal) && "Invalid page reloc value!");
147 // Insert the value into the instruction.
148 uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000;
149 uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0;
150 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
151 break;
152 }
153 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
154 case MachO::ARM64_RELOC_PAGEOFF12: {
155 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
156 // Mask the value into the target address. We know instructions are
157 // 32-bit aligned, so we can do it all at once.
158 uint32_t *p = (uint32_t *)LocalAddress;
159 // Check if the addend is encoded in the instruction.
160 uint32_t EncodedAddend = *p & 0x003FFC00;
161 if (EncodedAddend != 0) {
162 if (RE.Addend == 0)
163 llvm_unreachable("adrp instruction has embedded addend.");
164 else
165 llvm_unreachable("adrp instruction has embedded addend and"
166 "ARM64_RELOC_ADDEND.");
167 }
168 // Add the offset from the symbol.
169 Value += RE.Addend;
170 // Mask out the page address and only use the lower 12 bits.
171 Value &= 0xFFF;
172 // Check which instruction we are updating to obtain the implicit shift
173 // factor from LDR/STR instructions.
174 if (*p & 0x08000000) {
175 uint32_t ImplicitShift = ((*p >> 30) & 0x3);
176 switch (ImplicitShift) {
177 case 0:
178 // Check if this a vector op.
179 if ((*p & 0x04800000) == 0x04800000) {
180 ImplicitShift = 4;
181 assert(((Value & 0xF) == 0) &&
182 "128-bit LDR/STR not 16-byte aligned.");
183 }
184 break;
185 case 1:
186 assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
187 case 2:
188 assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
189 case 3:
190 assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
191 }
192 // Compensate for implicit shift.
193 Value >>= ImplicitShift;
194 }
195 // Insert the value into the instruction.
196 *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00);
197 break;
198 }
199 case MachO::ARM64_RELOC_SUBTRACTOR:
200 case MachO::ARM64_RELOC_POINTER_TO_GOT:
201 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
202 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
203 llvm_unreachable("Relocation type not implemented yet!");
204 case MachO::ARM64_RELOC_ADDEND:
205 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
206 "processRelocationRef!");
207 }
208 }
209
210 void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
211 const SectionRef &Section) {}
212
213private:
214 void processGOTRelocation(const RelocationEntry &RE,
215 RelocationValueRef &Value, StubMap &Stubs) {
216 assert(RE.Size == 2);
217 SectionEntry &Section = Sections[RE.SectionID];
218 StubMap::const_iterator i = Stubs.find(Value);
219 uint8_t *Addr;
220 if (i != Stubs.end())
221 Addr = Section.Address + i->second;
222 else {
223 // FIXME: There must be a better way to do this then to check and fix the
224 // alignment every time!!!
225 uintptr_t BaseAddress = uintptr_t(Section.Address);
226 uintptr_t StubAlignment = getStubAlignment();
227 uintptr_t StubAddress =
228 (BaseAddress + Section.StubOffset + StubAlignment - 1) &
229 -StubAlignment;
230 unsigned StubOffset = StubAddress - BaseAddress;
231 Stubs[Value] = StubOffset;
232 assert(((StubAddress % getStubAlignment()) == 0) &&
233 "GOT entry not aligned");
234 RelocationEntry GOTRE(RE.SectionID, StubOffset,
235 MachO::ARM64_RELOC_UNSIGNED, Value.Addend,
236 /*IsPCRel=*/false, /*Size=*/3);
237 if (Value.SymbolName)
238 addRelocationForSymbol(GOTRE, Value.SymbolName);
239 else
240 addRelocationForSection(GOTRE, Value.SectionID);
241 Section.StubOffset = StubOffset + getMaxStubSize();
242 Addr = (uint8_t *)StubAddress;
243 }
244 RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0,
245 RE.IsPCRel, RE.Size);
246 resolveRelocation(TargetRE, (uint64_t)Addr);
247 }
248};
249}
250
251#undef DEBUG_TYPE
252
253#endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H