Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 1 | //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | |
| 10 | #ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H |
| 11 | #define LLVM_RUNTIMEDYLDMACHOAARCH64_H |
| 12 | |
| 13 | #include "../RuntimeDyldMachO.h" |
| 14 | |
| 15 | #define DEBUG_TYPE "dyld" |
| 16 | |
| 17 | namespace llvm { |
| 18 | |
| 19 | class RuntimeDyldMachOAArch64 |
| 20 | : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> { |
| 21 | public: |
| 22 | RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM) |
| 23 | : RuntimeDyldMachOCRTPBase(MM) {} |
| 24 | |
| 25 | unsigned getMaxStubSize() override { return 8; } |
| 26 | |
Lang Hames | e5fc826 | 2014-07-17 23:11:30 +0000 | [diff] [blame] | 27 | unsigned getStubAlignment() override { return 8; } |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 28 | |
Juergen Ributzka | b13b52e | 2014-07-22 21:42:51 +0000 | [diff] [blame] | 29 | /// Extract the addend encoded in the instruction / memory location. |
| 30 | int64_t decodeAddend(uint8_t *LocalAddress, unsigned NumBytes, |
| 31 | uint32_t RelType) const { |
| 32 | int64_t Addend = 0; |
| 33 | // Verify that the relocation has the correct size and alignment. |
| 34 | switch (RelType) { |
| 35 | default: |
| 36 | llvm_unreachable("Unsupported relocation type!"); |
| 37 | case MachO::ARM64_RELOC_UNSIGNED: |
| 38 | assert((NumBytes >= 4 && NumBytes <= 8) && "Invalid relocation size."); |
| 39 | break; |
| 40 | case MachO::ARM64_RELOC_BRANCH26: |
| 41 | case MachO::ARM64_RELOC_PAGE21: |
| 42 | case MachO::ARM64_RELOC_PAGEOFF12: |
| 43 | case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: |
| 44 | case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: |
| 45 | assert(NumBytes == 4 && "Invalid relocation size."); |
| 46 | assert((((uintptr_t)LocalAddress & 0x3) == 0) && |
| 47 | "Instruction address is not aligned to 4 bytes."); |
| 48 | break; |
| 49 | } |
| 50 | |
| 51 | switch (RelType) { |
| 52 | default: |
| 53 | llvm_unreachable("Unsupported relocation type!"); |
| 54 | case MachO::ARM64_RELOC_UNSIGNED: |
| 55 | // This could be an unaligned memory location - use memcpy. |
| 56 | memcpy(&Addend, LocalAddress, NumBytes); |
| 57 | break; |
| 58 | case MachO::ARM64_RELOC_BRANCH26: { |
| 59 | // Verify that the relocation points to the expected branch instruction. |
| 60 | uint32_t *p = (uint32_t *)LocalAddress; |
| 61 | assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); |
| 62 | |
| 63 | // Get the 26 bit addend encoded in the branch instruction and sign-extend |
| 64 | // to 64 bit. The lower 2 bits are always zeros and are therefore implicit |
| 65 | // (<< 2). |
| 66 | Addend = (*p & 0x03FFFFFF) << 2; |
| 67 | Addend = SignExtend64(Addend, 28); |
| 68 | break; |
| 69 | } |
| 70 | case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: |
| 71 | case MachO::ARM64_RELOC_PAGE21: { |
| 72 | // Verify that the relocation points to the expected adrp instruction. |
| 73 | uint32_t *p = (uint32_t *)LocalAddress; |
| 74 | assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); |
| 75 | |
| 76 | // Get the 21 bit addend encoded in the adrp instruction and sign-extend |
| 77 | // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are |
| 78 | // therefore implicit (<< 12). |
| 79 | Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12; |
| 80 | Addend = SignExtend64(Addend, 33); |
| 81 | break; |
| 82 | } |
| 83 | case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { |
| 84 | // Verify that the relocation points to one of the expected load / store |
| 85 | // instructions. |
| 86 | uint32_t *p = (uint32_t *)LocalAddress; |
| 87 | assert((*p & 0x3B000000) == 0x39000000 && |
| 88 | "Only expected load / store instructions."); |
| 89 | } // fall-through |
| 90 | case MachO::ARM64_RELOC_PAGEOFF12: { |
| 91 | // Verify that the relocation points to one of the expected load / store |
| 92 | // or add / sub instructions. |
| 93 | uint32_t *p = (uint32_t *)LocalAddress; |
| 94 | assert((((*p & 0x3B000000) == 0x39000000) || |
| 95 | ((*p & 0x11C00000) == 0x11000000) ) && |
| 96 | "Expected load / store or add/sub instruction."); |
| 97 | |
| 98 | // Get the 12 bit addend encoded in the instruction. |
| 99 | Addend = (*p & 0x003FFC00) >> 10; |
| 100 | |
| 101 | // Check which instruction we are decoding to obtain the implicit shift |
| 102 | // factor of the instruction. |
| 103 | int ImplicitShift = 0; |
| 104 | if ((*p & 0x3B000000) == 0x39000000) { // << load / store |
| 105 | // For load / store instructions the size is encoded in bits 31:30. |
| 106 | ImplicitShift = ((*p >> 30) & 0x3); |
| 107 | if (ImplicitShift == 0) { |
| 108 | // Check if this a vector op to get the correct shift value. |
| 109 | if ((*p & 0x04800000) == 0x04800000) |
| 110 | ImplicitShift = 4; |
| 111 | } |
| 112 | } |
| 113 | // Compensate for implicit shift. |
| 114 | Addend <<= ImplicitShift; |
| 115 | break; |
| 116 | } |
| 117 | } |
| 118 | return Addend; |
| 119 | } |
| 120 | |
Juergen Ributzka | f560928 | 2014-07-22 21:42:55 +0000 | [diff] [blame] | 121 | /// Extract the addend encoded in the instruction. |
| 122 | void encodeAddend(uint8_t *LocalAddress, uint32_t RelType, |
| 123 | int64_t Addend) const { |
| 124 | // Verify that the relocation has the correct alignment. |
| 125 | switch (RelType) { |
| 126 | default: |
| 127 | llvm_unreachable("Unsupported relocation type!"); |
| 128 | case MachO::ARM64_RELOC_UNSIGNED: |
| 129 | llvm_unreachable("Invalid relocation type for instruction."); |
| 130 | case MachO::ARM64_RELOC_BRANCH26: |
| 131 | case MachO::ARM64_RELOC_PAGE21: |
| 132 | case MachO::ARM64_RELOC_PAGEOFF12: |
| 133 | case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: |
| 134 | case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: |
| 135 | assert((((uintptr_t)LocalAddress & 0x3) == 0) && |
| 136 | "Instruction address is not aligned to 4 bytes."); |
| 137 | break; |
| 138 | } |
| 139 | |
| 140 | switch (RelType) { |
| 141 | default: |
| 142 | llvm_unreachable("Unsupported relocation type!"); |
| 143 | case MachO::ARM64_RELOC_BRANCH26: { |
| 144 | // Verify that the relocation points to the expected branch instruction. |
| 145 | uint32_t *p = (uint32_t *)LocalAddress; |
| 146 | assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); |
| 147 | |
| 148 | // Verify addend value. |
| 149 | assert((Addend & 0x3) == 0 && "Branch target is not aligned"); |
| 150 | assert(isInt<28>(Addend) && "Branch target is out of range."); |
| 151 | |
| 152 | // Encode the addend as 26 bit immediate in the branch instruction. |
| 153 | *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF); |
| 154 | break; |
| 155 | } |
| 156 | case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: |
| 157 | case MachO::ARM64_RELOC_PAGE21: { |
| 158 | // Verify that the relocation points to the expected adrp instruction. |
| 159 | uint32_t *p = (uint32_t *)LocalAddress; |
| 160 | assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); |
| 161 | |
| 162 | // Check that the addend fits into 21 bits (+ 12 lower bits). |
| 163 | assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned."); |
| 164 | assert(isInt<33>(Addend) && "Invalid page reloc value."); |
| 165 | |
| 166 | // Encode the addend into the instruction. |
| 167 | uint32_t ImmLoValue = (uint32_t)(Addend << 17) & 0x60000000; |
| 168 | uint32_t ImmHiValue = (uint32_t)(Addend >> 9) & 0x00FFFFE0; |
| 169 | *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue; |
| 170 | break; |
| 171 | } |
| 172 | case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { |
| 173 | // Verify that the relocation points to one of the expected load / store |
| 174 | // instructions. |
| 175 | uint32_t *p = (uint32_t *)LocalAddress; |
| 176 | assert((*p & 0x3B000000) == 0x39000000 && |
| 177 | "Only expected load / store instructions."); |
| 178 | } // fall-through |
| 179 | case MachO::ARM64_RELOC_PAGEOFF12: { |
| 180 | // Verify that the relocation points to one of the expected load / store |
| 181 | // or add / sub instructions. |
| 182 | uint32_t *p = (uint32_t *)LocalAddress; |
| 183 | assert((((*p & 0x3B000000) == 0x39000000) || |
| 184 | ((*p & 0x11C00000) == 0x11000000) ) && |
| 185 | "Expected load / store or add/sub instruction."); |
| 186 | |
| 187 | // Check which instruction we are decoding to obtain the implicit shift |
| 188 | // factor of the instruction and verify alignment. |
| 189 | int ImplicitShift = 0; |
| 190 | if ((*p & 0x3B000000) == 0x39000000) { // << load / store |
| 191 | // For load / store instructions the size is encoded in bits 31:30. |
| 192 | ImplicitShift = ((*p >> 30) & 0x3); |
| 193 | switch (ImplicitShift) { |
| 194 | case 0: |
| 195 | // Check if this a vector op to get the correct shift value. |
| 196 | if ((*p & 0x04800000) == 0x04800000) { |
| 197 | ImplicitShift = 4; |
| 198 | assert(((Addend & 0xF) == 0) && |
| 199 | "128-bit LDR/STR not 16-byte aligned."); |
| 200 | } |
| 201 | break; |
| 202 | case 1: |
| 203 | assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned."); |
| 204 | break; |
| 205 | case 2: |
| 206 | assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned."); |
| 207 | break; |
| 208 | case 3: |
| 209 | assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned."); |
| 210 | break; |
| 211 | } |
| 212 | } |
| 213 | // Compensate for implicit shift. |
| 214 | Addend >>= ImplicitShift; |
| 215 | assert(isUInt<12>(Addend) && "Addend cannot be encoded."); |
| 216 | |
| 217 | // Encode the addend into the instruction. |
| 218 | *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00); |
| 219 | break; |
| 220 | } |
| 221 | } |
| 222 | } |
| 223 | |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 224 | relocation_iterator |
| 225 | processRelocationRef(unsigned SectionID, relocation_iterator RelI, |
| 226 | ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID, |
| 227 | const SymbolTableMap &Symbols, StubMap &Stubs) override { |
| 228 | const MachOObjectFile &Obj = |
| 229 | static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile()); |
| 230 | MachO::any_relocation_info RelInfo = |
| 231 | Obj.getRelocation(RelI->getRawDataRefImpl()); |
| 232 | |
| 233 | assert(!Obj.isRelocationScattered(RelInfo) && ""); |
| 234 | |
| 235 | // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit |
| 236 | // addend for the following relocation. If found: (1) store the associated |
| 237 | // addend, (2) consume the next relocation, and (3) use the stored addend to |
| 238 | // override the addend. |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 239 | int64_t ExplicitAddend = 0; |
| 240 | if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) { |
| 241 | assert(!Obj.getPlainRelocationExternal(RelInfo)); |
| 242 | assert(!Obj.getAnyRelocationPCRel(RelInfo)); |
| 243 | assert(Obj.getAnyRelocationLength(RelInfo) == 2); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 244 | int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo); |
| 245 | // Sign-extend the 24-bit to 64-bit. |
Juergen Ributzka | dd19d33 | 2014-07-22 21:42:49 +0000 | [diff] [blame] | 246 | ExplicitAddend = SignExtend64(RawAddend, 24); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 247 | ++RelI; |
| 248 | RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl()); |
| 249 | } |
| 250 | |
| 251 | RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI)); |
| 252 | RelocationValueRef Value( |
| 253 | getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols)); |
| 254 | |
Juergen Ributzka | dd19d33 | 2014-07-22 21:42:49 +0000 | [diff] [blame] | 255 | assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\ |
| 256 | "ARM64_RELOC_ADDEND and embedded addend in the instruction."); |
| 257 | if (ExplicitAddend) { |
Lang Hames | 76774a5 | 2014-07-18 20:29:36 +0000 | [diff] [blame] | 258 | RE.Addend = ExplicitAddend; |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 259 | Value.Addend = ExplicitAddend; |
Lang Hames | 76774a5 | 2014-07-18 20:29:36 +0000 | [diff] [blame] | 260 | } |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 261 | |
| 262 | bool IsExtern = Obj.getPlainRelocationExternal(RelInfo); |
| 263 | if (!IsExtern && RE.IsPCRel) |
| 264 | makeValueAddendPCRel(Value, ObjImg, RelI); |
| 265 | |
| 266 | RE.Addend = Value.Addend; |
| 267 | |
| 268 | if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 || |
| 269 | RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12) |
| 270 | processGOTRelocation(RE, Value, Stubs); |
| 271 | else { |
| 272 | if (Value.SymbolName) |
| 273 | addRelocationForSymbol(RE, Value.SymbolName); |
| 274 | else |
| 275 | addRelocationForSection(RE, Value.SectionID); |
| 276 | } |
| 277 | |
| 278 | return ++RelI; |
| 279 | } |
| 280 | |
| 281 | void resolveRelocation(const RelocationEntry &RE, uint64_t Value) { |
| 282 | DEBUG(dumpRelocationToResolve(RE, Value)); |
| 283 | |
| 284 | const SectionEntry &Section = Sections[RE.SectionID]; |
| 285 | uint8_t *LocalAddress = Section.Address + RE.Offset; |
| 286 | |
| 287 | switch (RE.RelType) { |
| 288 | default: |
| 289 | llvm_unreachable("Invalid relocation type!"); |
| 290 | case MachO::ARM64_RELOC_UNSIGNED: { |
| 291 | assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported"); |
| 292 | // Mask in the target value a byte at a time (we don't have an alignment |
| 293 | // guarantee for the target address, so this is safest). |
| 294 | if (RE.Size < 2) |
| 295 | llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED"); |
| 296 | |
| 297 | writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size); |
| 298 | break; |
| 299 | } |
| 300 | case MachO::ARM64_RELOC_BRANCH26: { |
| 301 | assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported"); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 302 | // Check if branch is in range. |
| 303 | uint64_t FinalAddress = Section.LoadAddress + RE.Offset; |
Juergen Ributzka | f560928 | 2014-07-22 21:42:55 +0000 | [diff] [blame] | 304 | int64_t PCRelVal = Value - FinalAddress + RE.Addend; |
| 305 | encodeAddend(LocalAddress, RE.RelType, PCRelVal); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 306 | break; |
| 307 | } |
| 308 | case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: |
| 309 | case MachO::ARM64_RELOC_PAGE21: { |
| 310 | assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported"); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 311 | // Adjust for PC-relative relocation and offset. |
| 312 | uint64_t FinalAddress = Section.LoadAddress + RE.Offset; |
Juergen Ributzka | f560928 | 2014-07-22 21:42:55 +0000 | [diff] [blame] | 313 | int64_t PCRelVal = |
| 314 | ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096)); |
| 315 | encodeAddend(LocalAddress, RE.RelType, PCRelVal); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 316 | break; |
| 317 | } |
| 318 | case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: |
| 319 | case MachO::ARM64_RELOC_PAGEOFF12: { |
| 320 | assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported"); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 321 | // Add the offset from the symbol. |
| 322 | Value += RE.Addend; |
| 323 | // Mask out the page address and only use the lower 12 bits. |
| 324 | Value &= 0xFFF; |
Juergen Ributzka | f560928 | 2014-07-22 21:42:55 +0000 | [diff] [blame] | 325 | encodeAddend(LocalAddress, RE.RelType, Value); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 326 | break; |
| 327 | } |
| 328 | case MachO::ARM64_RELOC_SUBTRACTOR: |
| 329 | case MachO::ARM64_RELOC_POINTER_TO_GOT: |
| 330 | case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: |
| 331 | case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: |
Juergen Ributzka | f560928 | 2014-07-22 21:42:55 +0000 | [diff] [blame] | 332 | llvm_unreachable("Relocation type not yet implemented!"); |
Lang Hames | a521688 | 2014-07-17 18:54:50 +0000 | [diff] [blame] | 333 | case MachO::ARM64_RELOC_ADDEND: |
| 334 | llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by " |
| 335 | "processRelocationRef!"); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | void finalizeSection(ObjectImage &ObjImg, unsigned SectionID, |
| 340 | const SectionRef &Section) {} |
| 341 | |
| 342 | private: |
| 343 | void processGOTRelocation(const RelocationEntry &RE, |
| 344 | RelocationValueRef &Value, StubMap &Stubs) { |
| 345 | assert(RE.Size == 2); |
| 346 | SectionEntry &Section = Sections[RE.SectionID]; |
| 347 | StubMap::const_iterator i = Stubs.find(Value); |
| 348 | uint8_t *Addr; |
| 349 | if (i != Stubs.end()) |
| 350 | Addr = Section.Address + i->second; |
| 351 | else { |
| 352 | // FIXME: There must be a better way to do this then to check and fix the |
| 353 | // alignment every time!!! |
| 354 | uintptr_t BaseAddress = uintptr_t(Section.Address); |
| 355 | uintptr_t StubAlignment = getStubAlignment(); |
| 356 | uintptr_t StubAddress = |
| 357 | (BaseAddress + Section.StubOffset + StubAlignment - 1) & |
| 358 | -StubAlignment; |
| 359 | unsigned StubOffset = StubAddress - BaseAddress; |
| 360 | Stubs[Value] = StubOffset; |
| 361 | assert(((StubAddress % getStubAlignment()) == 0) && |
| 362 | "GOT entry not aligned"); |
| 363 | RelocationEntry GOTRE(RE.SectionID, StubOffset, |
| 364 | MachO::ARM64_RELOC_UNSIGNED, Value.Addend, |
| 365 | /*IsPCRel=*/false, /*Size=*/3); |
| 366 | if (Value.SymbolName) |
| 367 | addRelocationForSymbol(GOTRE, Value.SymbolName); |
| 368 | else |
| 369 | addRelocationForSection(GOTRE, Value.SectionID); |
| 370 | Section.StubOffset = StubOffset + getMaxStubSize(); |
| 371 | Addr = (uint8_t *)StubAddress; |
| 372 | } |
| 373 | RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0, |
| 374 | RE.IsPCRel, RE.Size); |
| 375 | resolveRelocation(TargetRE, (uint64_t)Addr); |
| 376 | } |
| 377 | }; |
| 378 | } |
| 379 | |
| 380 | #undef DEBUG_TYPE |
| 381 | |
| 382 | #endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H |