blob: d961606f0a32e2142191d637786397594fe560d8 [file] [log] [blame]
Lang Hamesa5216882014-07-17 18:54:50 +00001//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H
11#define LLVM_RUNTIMEDYLDMACHOAARCH64_H
12
13#include "../RuntimeDyldMachO.h"
14
15#define DEBUG_TYPE "dyld"
16
17namespace llvm {
18
19class RuntimeDyldMachOAArch64
20 : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
21public:
22 RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM)
23 : RuntimeDyldMachOCRTPBase(MM) {}
24
25 unsigned getMaxStubSize() override { return 8; }
26
Lang Hamese5fc8262014-07-17 23:11:30 +000027 unsigned getStubAlignment() override { return 8; }
Lang Hamesa5216882014-07-17 18:54:50 +000028
29 relocation_iterator
30 processRelocationRef(unsigned SectionID, relocation_iterator RelI,
31 ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
32 const SymbolTableMap &Symbols, StubMap &Stubs) override {
33 const MachOObjectFile &Obj =
34 static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
35 MachO::any_relocation_info RelInfo =
36 Obj.getRelocation(RelI->getRawDataRefImpl());
37
38 assert(!Obj.isRelocationScattered(RelInfo) && "");
39
40 // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
41 // addend for the following relocation. If found: (1) store the associated
42 // addend, (2) consume the next relocation, and (3) use the stored addend to
43 // override the addend.
Lang Hamesa5216882014-07-17 18:54:50 +000044 int64_t ExplicitAddend = 0;
45 if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
46 assert(!Obj.getPlainRelocationExternal(RelInfo));
47 assert(!Obj.getAnyRelocationPCRel(RelInfo));
48 assert(Obj.getAnyRelocationLength(RelInfo) == 2);
Lang Hamesa5216882014-07-17 18:54:50 +000049 int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
50 // Sign-extend the 24-bit to 64-bit.
Juergen Ributzkadd19d332014-07-22 21:42:49 +000051 ExplicitAddend = SignExtend64(RawAddend, 24);
Lang Hamesa5216882014-07-17 18:54:50 +000052 ++RelI;
53 RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
54 }
55
56 RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
57 RelocationValueRef Value(
58 getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
59
Juergen Ributzkadd19d332014-07-22 21:42:49 +000060 assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
61 "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
62 if (ExplicitAddend) {
Lang Hames76774a52014-07-18 20:29:36 +000063 RE.Addend = ExplicitAddend;
Lang Hamesa5216882014-07-17 18:54:50 +000064 Value.Addend = ExplicitAddend;
Lang Hames76774a52014-07-18 20:29:36 +000065 }
Lang Hamesa5216882014-07-17 18:54:50 +000066
67 bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
68 if (!IsExtern && RE.IsPCRel)
69 makeValueAddendPCRel(Value, ObjImg, RelI);
70
71 RE.Addend = Value.Addend;
72
73 if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
74 RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
75 processGOTRelocation(RE, Value, Stubs);
76 else {
77 if (Value.SymbolName)
78 addRelocationForSymbol(RE, Value.SymbolName);
79 else
80 addRelocationForSection(RE, Value.SectionID);
81 }
82
83 return ++RelI;
84 }
85
86 void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
87 DEBUG(dumpRelocationToResolve(RE, Value));
88
89 const SectionEntry &Section = Sections[RE.SectionID];
90 uint8_t *LocalAddress = Section.Address + RE.Offset;
91
92 switch (RE.RelType) {
93 default:
94 llvm_unreachable("Invalid relocation type!");
95 case MachO::ARM64_RELOC_UNSIGNED: {
96 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
97 // Mask in the target value a byte at a time (we don't have an alignment
98 // guarantee for the target address, so this is safest).
99 if (RE.Size < 2)
100 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
101
102 writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
103 break;
104 }
105 case MachO::ARM64_RELOC_BRANCH26: {
106 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
107 // Mask the value into the target address. We know instructions are
108 // 32-bit aligned, so we can do it all at once.
109 uint32_t *p = (uint32_t *)LocalAddress;
110 // Check if the addend is encoded in the instruction.
111 uint32_t EncodedAddend = *p & 0x03FFFFFF;
112 if (EncodedAddend != 0) {
113 if (RE.Addend == 0)
114 llvm_unreachable("branch26 instruction has embedded addend.");
115 else
116 llvm_unreachable("branch26 instruction has embedded addend and"
117 "ARM64_RELOC_ADDEND.");
118 }
119 // Check if branch is in range.
120 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
121 uint64_t PCRelVal = Value - FinalAddress + RE.Addend;
122 assert(isInt<26>(PCRelVal) && "Branch target out of range!");
123 // Insert the value into the instruction.
124 *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF);
125 break;
126 }
127 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
128 case MachO::ARM64_RELOC_PAGE21: {
129 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
130 // Mask the value into the target address. We know instructions are
131 // 32-bit aligned, so we can do it all at once.
132 uint32_t *p = (uint32_t *)LocalAddress;
133 // Check if the addend is encoded in the instruction.
134 uint32_t EncodedAddend =
135 ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3);
136 if (EncodedAddend != 0) {
137 if (RE.Addend == 0)
138 llvm_unreachable("adrp instruction has embedded addend.");
139 else
140 llvm_unreachable("adrp instruction has embedded addend and"
141 "ARM64_RELOC_ADDEND.");
142 }
143 // Adjust for PC-relative relocation and offset.
144 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
145 uint64_t PCRelVal =
146 ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
147 // Check that the value fits into 21 bits (+ 12 lower bits).
148 assert(isInt<33>(PCRelVal) && "Invalid page reloc value!");
149 // Insert the value into the instruction.
150 uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000;
151 uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0;
152 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
153 break;
154 }
155 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
156 case MachO::ARM64_RELOC_PAGEOFF12: {
157 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
158 // Mask the value into the target address. We know instructions are
159 // 32-bit aligned, so we can do it all at once.
160 uint32_t *p = (uint32_t *)LocalAddress;
161 // Check if the addend is encoded in the instruction.
162 uint32_t EncodedAddend = *p & 0x003FFC00;
163 if (EncodedAddend != 0) {
164 if (RE.Addend == 0)
165 llvm_unreachable("adrp instruction has embedded addend.");
166 else
167 llvm_unreachable("adrp instruction has embedded addend and"
168 "ARM64_RELOC_ADDEND.");
169 }
170 // Add the offset from the symbol.
171 Value += RE.Addend;
172 // Mask out the page address and only use the lower 12 bits.
173 Value &= 0xFFF;
174 // Check which instruction we are updating to obtain the implicit shift
175 // factor from LDR/STR instructions.
176 if (*p & 0x08000000) {
177 uint32_t ImplicitShift = ((*p >> 30) & 0x3);
178 switch (ImplicitShift) {
179 case 0:
180 // Check if this a vector op.
181 if ((*p & 0x04800000) == 0x04800000) {
182 ImplicitShift = 4;
183 assert(((Value & 0xF) == 0) &&
184 "128-bit LDR/STR not 16-byte aligned.");
185 }
186 break;
187 case 1:
188 assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
189 case 2:
190 assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
191 case 3:
192 assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
193 }
194 // Compensate for implicit shift.
195 Value >>= ImplicitShift;
196 }
197 // Insert the value into the instruction.
198 *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00);
199 break;
200 }
201 case MachO::ARM64_RELOC_SUBTRACTOR:
202 case MachO::ARM64_RELOC_POINTER_TO_GOT:
203 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
204 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
205 llvm_unreachable("Relocation type not implemented yet!");
206 case MachO::ARM64_RELOC_ADDEND:
207 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
208 "processRelocationRef!");
209 }
210 }
211
212 void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
213 const SectionRef &Section) {}
214
215private:
216 void processGOTRelocation(const RelocationEntry &RE,
217 RelocationValueRef &Value, StubMap &Stubs) {
218 assert(RE.Size == 2);
219 SectionEntry &Section = Sections[RE.SectionID];
220 StubMap::const_iterator i = Stubs.find(Value);
221 uint8_t *Addr;
222 if (i != Stubs.end())
223 Addr = Section.Address + i->second;
224 else {
225 // FIXME: There must be a better way to do this then to check and fix the
226 // alignment every time!!!
227 uintptr_t BaseAddress = uintptr_t(Section.Address);
228 uintptr_t StubAlignment = getStubAlignment();
229 uintptr_t StubAddress =
230 (BaseAddress + Section.StubOffset + StubAlignment - 1) &
231 -StubAlignment;
232 unsigned StubOffset = StubAddress - BaseAddress;
233 Stubs[Value] = StubOffset;
234 assert(((StubAddress % getStubAlignment()) == 0) &&
235 "GOT entry not aligned");
236 RelocationEntry GOTRE(RE.SectionID, StubOffset,
237 MachO::ARM64_RELOC_UNSIGNED, Value.Addend,
238 /*IsPCRel=*/false, /*Size=*/3);
239 if (Value.SymbolName)
240 addRelocationForSymbol(GOTRE, Value.SymbolName);
241 else
242 addRelocationForSection(GOTRE, Value.SectionID);
243 Section.StubOffset = StubOffset + getMaxStubSize();
244 Addr = (uint8_t *)StubAddress;
245 }
246 RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0,
247 RE.IsPCRel, RE.Size);
248 resolveRelocation(TargetRE, (uint64_t)Addr);
249 }
250};
251}
252
253#undef DEBUG_TYPE
254
255#endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H