Lang Hames | 11c8dfa5 | 2019-04-20 17:10:34 +0000 | [diff] [blame] | 1 | //===--------- JITLinkGeneric.cpp - Generic JIT linker utilities ----------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // Generic JITLinker utility class. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "JITLinkGeneric.h" |
Lang Hames | 1233c15 | 2019-04-22 03:03:09 +0000 | [diff] [blame] | 14 | #include "EHFrameSupportImpl.h" |
Lang Hames | 11c8dfa5 | 2019-04-20 17:10:34 +0000 | [diff] [blame] | 15 | |
| 16 | #include "llvm/Support/BinaryStreamReader.h" |
| 17 | #include "llvm/Support/MemoryBuffer.h" |
| 18 | |
| 19 | #define DEBUG_TYPE "jitlink" |
| 20 | |
| 21 | namespace llvm { |
| 22 | namespace jitlink { |
| 23 | |
| 24 | JITLinkerBase::~JITLinkerBase() {} |
| 25 | |
| 26 | void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) { |
| 27 | |
| 28 | // Build the atom graph. |
| 29 | if (auto GraphOrErr = buildGraph(Ctx->getObjectBuffer())) |
| 30 | G = std::move(*GraphOrErr); |
| 31 | else |
| 32 | return Ctx->notifyFailed(GraphOrErr.takeError()); |
| 33 | assert(G && "Graph should have been created by buildGraph above"); |
| 34 | |
| 35 | // Prune and optimize the graph. |
| 36 | if (auto Err = runPasses(Passes.PrePrunePasses, *G)) |
| 37 | return Ctx->notifyFailed(std::move(Err)); |
| 38 | |
| 39 | LLVM_DEBUG({ |
| 40 | dbgs() << "Atom graph \"" << G->getName() << "\" pre-pruning:\n"; |
| 41 | dumpGraph(dbgs()); |
| 42 | }); |
| 43 | |
| 44 | prune(*G); |
| 45 | |
| 46 | LLVM_DEBUG({ |
| 47 | dbgs() << "Atom graph \"" << G->getName() << "\" post-pruning:\n"; |
| 48 | dumpGraph(dbgs()); |
| 49 | }); |
| 50 | |
| 51 | // Run post-pruning passes. |
| 52 | if (auto Err = runPasses(Passes.PostPrunePasses, *G)) |
| 53 | return Ctx->notifyFailed(std::move(Err)); |
| 54 | |
| 55 | // Sort atoms into segments. |
| 56 | layOutAtoms(); |
| 57 | |
| 58 | // Allocate memory for segments. |
| 59 | if (auto Err = allocateSegments(Layout)) |
| 60 | return Ctx->notifyFailed(std::move(Err)); |
| 61 | |
| 62 | // Notify client that the defined atoms have been assigned addresses. |
| 63 | Ctx->notifyResolved(*G); |
| 64 | |
| 65 | auto ExternalSymbols = getExternalSymbolNames(); |
| 66 | |
| 67 | // We're about to hand off ownership of ourself to the continuation. Grab a |
| 68 | // pointer to the context so that we can call it to initiate the lookup. |
| 69 | // |
| 70 | // FIXME: Once callee expressions are defined to be sequenced before argument |
| 71 | // expressions (c++17) we can simplify all this to: |
| 72 | // |
| 73 | // Ctx->lookup(std::move(UnresolvedExternals), |
| 74 | // [Self=std::move(Self)](Expected<AsyncLookupResult> Result) { |
| 75 | // Self->linkPhase2(std::move(Self), std::move(Result)); |
| 76 | // }); |
| 77 | // |
| 78 | // FIXME: Use move capture once we have c++14. |
| 79 | auto *TmpCtx = Ctx.get(); |
| 80 | auto *UnownedSelf = Self.release(); |
| 81 | auto Phase2Continuation = |
| 82 | [UnownedSelf](Expected<AsyncLookupResult> LookupResult) { |
| 83 | std::unique_ptr<JITLinkerBase> Self(UnownedSelf); |
| 84 | UnownedSelf->linkPhase2(std::move(Self), std::move(LookupResult)); |
| 85 | }; |
| 86 | TmpCtx->lookup(std::move(ExternalSymbols), std::move(Phase2Continuation)); |
| 87 | } |
| 88 | |
| 89 | void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self, |
| 90 | Expected<AsyncLookupResult> LR) { |
| 91 | // If the lookup failed, bail out. |
| 92 | if (!LR) |
| 93 | return Ctx->notifyFailed(LR.takeError()); |
| 94 | |
| 95 | // Assign addresses to external atoms. |
| 96 | applyLookupResult(*LR); |
| 97 | |
| 98 | LLVM_DEBUG({ |
| 99 | dbgs() << "Atom graph \"" << G->getName() << "\" before copy-and-fixup:\n"; |
| 100 | dumpGraph(dbgs()); |
| 101 | }); |
| 102 | |
| 103 | // Copy atom content to working memory and fix up. |
| 104 | if (auto Err = copyAndFixUpAllAtoms(Layout, *Alloc)) |
| 105 | return Ctx->notifyFailed(std::move(Err)); |
| 106 | |
| 107 | LLVM_DEBUG({ |
| 108 | dbgs() << "Atom graph \"" << G->getName() << "\" after copy-and-fixup:\n"; |
| 109 | dumpGraph(dbgs()); |
| 110 | }); |
| 111 | |
| 112 | if (auto Err = runPasses(Passes.PostFixupPasses, *G)) |
| 113 | return Ctx->notifyFailed(std::move(Err)); |
| 114 | |
| 115 | // FIXME: Use move capture once we have c++14. |
| 116 | auto *UnownedSelf = Self.release(); |
| 117 | auto Phase3Continuation = [UnownedSelf](Error Err) { |
| 118 | std::unique_ptr<JITLinkerBase> Self(UnownedSelf); |
| 119 | UnownedSelf->linkPhase3(std::move(Self), std::move(Err)); |
| 120 | }; |
| 121 | |
| 122 | Alloc->finalizeAsync(std::move(Phase3Continuation)); |
| 123 | } |
| 124 | |
| 125 | void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err) { |
| 126 | if (Err) |
| 127 | return Ctx->notifyFailed(std::move(Err)); |
| 128 | Ctx->notifyFinalized(std::move(Alloc)); |
| 129 | } |
| 130 | |
| 131 | Error JITLinkerBase::runPasses(AtomGraphPassList &Passes, AtomGraph &G) { |
| 132 | for (auto &P : Passes) |
| 133 | if (auto Err = P(G)) |
| 134 | return Err; |
| 135 | return Error::success(); |
| 136 | } |
| 137 | |
| 138 | void JITLinkerBase::layOutAtoms() { |
| 139 | // Group sections by protections, and whether or not they're zero-fill. |
| 140 | for (auto &S : G->sections()) { |
| 141 | |
| 142 | // Skip empty sections. |
| 143 | if (S.atoms_empty()) |
| 144 | continue; |
| 145 | |
| 146 | auto &SL = Layout[S.getProtectionFlags()]; |
| 147 | if (S.isZeroFill()) |
| 148 | SL.ZeroFillSections.push_back(SegmentLayout::SectionLayout(S)); |
| 149 | else |
| 150 | SL.ContentSections.push_back(SegmentLayout::SectionLayout(S)); |
| 151 | } |
| 152 | |
| 153 | // Sort sections within the layout by ordinal. |
| 154 | { |
| 155 | auto CompareByOrdinal = [](const SegmentLayout::SectionLayout &LHS, |
| 156 | const SegmentLayout::SectionLayout &RHS) { |
| 157 | return LHS.S->getSectionOrdinal() < RHS.S->getSectionOrdinal(); |
| 158 | }; |
| 159 | for (auto &KV : Layout) { |
| 160 | auto &SL = KV.second; |
| 161 | std::sort(SL.ContentSections.begin(), SL.ContentSections.end(), |
| 162 | CompareByOrdinal); |
| 163 | std::sort(SL.ZeroFillSections.begin(), SL.ZeroFillSections.end(), |
| 164 | CompareByOrdinal); |
| 165 | } |
| 166 | } |
| 167 | |
| 168 | // Add atoms to the sections. |
| 169 | for (auto &KV : Layout) { |
| 170 | auto &SL = KV.second; |
| 171 | for (auto *SIList : {&SL.ContentSections, &SL.ZeroFillSections}) { |
| 172 | for (auto &SI : *SIList) { |
| 173 | std::vector<DefinedAtom *> LayoutHeads; |
| 174 | LayoutHeads.reserve(SI.S->atoms_size()); |
| 175 | |
| 176 | // First build the list of layout-heads (i.e. "heads" of layout-next |
| 177 | // chains). |
| 178 | DenseSet<DefinedAtom *> AlreadyLayedOut; |
| 179 | for (auto *DA : SI.S->atoms()) { |
| 180 | if (AlreadyLayedOut.count(DA)) |
| 181 | continue; |
| 182 | LayoutHeads.push_back(DA); |
| 183 | while (DA->hasLayoutNext()) { |
| 184 | auto &Next = DA->getLayoutNext(); |
| 185 | AlreadyLayedOut.insert(&Next); |
| 186 | DA = &Next; |
| 187 | } |
| 188 | } |
| 189 | |
| 190 | // Now sort the list of layout heads by address. |
| 191 | std::sort(LayoutHeads.begin(), LayoutHeads.end(), |
| 192 | [](const DefinedAtom *LHS, const DefinedAtom *RHS) { |
| 193 | return LHS->getAddress() < RHS->getAddress(); |
| 194 | }); |
| 195 | |
| 196 | // Now populate the SI.Atoms field by appending each of the chains. |
| 197 | for (auto *DA : LayoutHeads) { |
| 198 | SI.Atoms.push_back(DA); |
| 199 | while (DA->hasLayoutNext()) { |
| 200 | auto &Next = DA->getLayoutNext(); |
| 201 | SI.Atoms.push_back(&Next); |
| 202 | DA = &Next; |
| 203 | } |
| 204 | } |
| 205 | } |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | LLVM_DEBUG({ |
| 210 | dbgs() << "Segment ordering:\n"; |
| 211 | for (auto &KV : Layout) { |
| 212 | dbgs() << " Segment " |
| 213 | << static_cast<sys::Memory::ProtectionFlags>(KV.first) << ":\n"; |
| 214 | auto &SL = KV.second; |
| 215 | for (auto &SIEntry : |
| 216 | {std::make_pair(&SL.ContentSections, "content sections"), |
| 217 | std::make_pair(&SL.ZeroFillSections, "zero-fill sections")}) { |
| 218 | auto &SIList = *SIEntry.first; |
| 219 | dbgs() << " " << SIEntry.second << ":\n"; |
| 220 | for (auto &SI : SIList) { |
| 221 | dbgs() << " " << SI.S->getName() << ":\n"; |
| 222 | for (auto *DA : SI.Atoms) |
| 223 | dbgs() << " " << *DA << "\n"; |
| 224 | } |
| 225 | } |
| 226 | } |
| 227 | }); |
| 228 | } |
| 229 | |
| 230 | Error JITLinkerBase::allocateSegments(const SegmentLayoutMap &Layout) { |
| 231 | |
| 232 | // Compute segment sizes and allocate memory. |
| 233 | LLVM_DEBUG(dbgs() << "JIT linker requesting: { "); |
| 234 | JITLinkMemoryManager::SegmentsRequestMap Segments; |
| 235 | for (auto &KV : Layout) { |
| 236 | auto &Prot = KV.first; |
| 237 | auto &SegLayout = KV.second; |
| 238 | |
| 239 | // Calculate segment content size. |
| 240 | size_t SegContentSize = 0; |
| 241 | for (auto &SI : SegLayout.ContentSections) { |
| 242 | assert(!SI.S->atoms_empty() && "Sections in layout must not be empty"); |
| 243 | assert(!SI.Atoms.empty() && "Section layouts must not be empty"); |
| 244 | for (auto *DA : SI.Atoms) { |
| 245 | SegContentSize = alignTo(SegContentSize, DA->getAlignment()); |
| 246 | SegContentSize += DA->getSize(); |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | // Get segment content alignment. |
| 251 | unsigned SegContentAlign = 1; |
| 252 | if (!SegLayout.ContentSections.empty()) |
| 253 | SegContentAlign = |
| 254 | SegLayout.ContentSections.front().Atoms.front()->getAlignment(); |
| 255 | |
| 256 | // Calculate segment zero-fill size. |
| 257 | uint64_t SegZeroFillSize = 0; |
| 258 | for (auto &SI : SegLayout.ZeroFillSections) { |
| 259 | assert(!SI.S->atoms_empty() && "Sections in layout must not be empty"); |
| 260 | assert(!SI.Atoms.empty() && "Section layouts must not be empty"); |
| 261 | for (auto *DA : SI.Atoms) { |
| 262 | SegZeroFillSize = alignTo(SegZeroFillSize, DA->getAlignment()); |
| 263 | SegZeroFillSize += DA->getSize(); |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | // Calculate segment zero-fill alignment. |
| 268 | uint32_t SegZeroFillAlign = 1; |
| 269 | if (!SegLayout.ZeroFillSections.empty()) |
| 270 | SegZeroFillAlign = |
| 271 | SegLayout.ZeroFillSections.front().Atoms.front()->getAlignment(); |
| 272 | |
| 273 | if (SegContentSize == 0) |
| 274 | SegContentAlign = SegZeroFillAlign; |
| 275 | |
| 276 | if (SegContentAlign % SegZeroFillAlign != 0) |
| 277 | return make_error<JITLinkError>("First content atom alignment does not " |
| 278 | "accommodate first zero-fill atom " |
| 279 | "alignment"); |
| 280 | |
| 281 | Segments[Prot] = {SegContentSize, SegContentAlign, SegZeroFillSize, |
| 282 | SegZeroFillAlign}; |
| 283 | |
| 284 | LLVM_DEBUG({ |
| 285 | dbgs() << (&KV == &*Layout.begin() ? "" : "; ") |
| 286 | << static_cast<sys::Memory::ProtectionFlags>(Prot) << ": " |
| 287 | << SegContentSize << " content bytes (alignment " |
| 288 | << SegContentAlign << ") + " << SegZeroFillSize |
| 289 | << " zero-fill bytes (alignment " << SegZeroFillAlign << ")"; |
| 290 | }); |
| 291 | } |
| 292 | LLVM_DEBUG(dbgs() << " }\n"); |
| 293 | |
| 294 | if (auto AllocOrErr = Ctx->getMemoryManager().allocate(Segments)) |
| 295 | Alloc = std::move(*AllocOrErr); |
| 296 | else |
| 297 | return AllocOrErr.takeError(); |
| 298 | |
| 299 | LLVM_DEBUG({ |
| 300 | dbgs() << "JIT linker got working memory:\n"; |
| 301 | for (auto &KV : Layout) { |
| 302 | auto Prot = static_cast<sys::Memory::ProtectionFlags>(KV.first); |
| 303 | dbgs() << " " << Prot << ": " |
| 304 | << (const void *)Alloc->getWorkingMemory(Prot).data() << "\n"; |
| 305 | } |
| 306 | }); |
| 307 | |
| 308 | // Update atom target addresses. |
| 309 | for (auto &KV : Layout) { |
| 310 | auto &Prot = KV.first; |
| 311 | auto &SL = KV.second; |
| 312 | |
| 313 | JITTargetAddress AtomTargetAddr = |
| 314 | Alloc->getTargetMemory(static_cast<sys::Memory::ProtectionFlags>(Prot)); |
| 315 | |
| 316 | for (auto *SIList : {&SL.ContentSections, &SL.ZeroFillSections}) |
| 317 | for (auto &SI : *SIList) |
| 318 | for (auto *DA : SI.Atoms) { |
| 319 | AtomTargetAddr = alignTo(AtomTargetAddr, DA->getAlignment()); |
| 320 | DA->setAddress(AtomTargetAddr); |
| 321 | AtomTargetAddr += DA->getSize(); |
| 322 | } |
| 323 | } |
| 324 | |
| 325 | return Error::success(); |
| 326 | } |
| 327 | |
| 328 | DenseSet<StringRef> JITLinkerBase::getExternalSymbolNames() const { |
| 329 | // Identify unresolved external atoms. |
| 330 | DenseSet<StringRef> UnresolvedExternals; |
| 331 | for (auto *DA : G->external_atoms()) { |
| 332 | assert(DA->getAddress() == 0 && |
| 333 | "External has already been assigned an address"); |
| 334 | assert(DA->getName() != StringRef() && DA->getName() != "" && |
| 335 | "Externals must be named"); |
| 336 | UnresolvedExternals.insert(DA->getName()); |
| 337 | } |
| 338 | return UnresolvedExternals; |
| 339 | } |
| 340 | |
| 341 | void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) { |
| 342 | for (auto &KV : Result) { |
| 343 | Atom &A = G->getAtomByName(KV.first); |
| 344 | assert(A.getAddress() == 0 && "Atom already resolved"); |
| 345 | A.setAddress(KV.second.getAddress()); |
| 346 | } |
| 347 | |
Lang Hames | d407b4b | 2019-04-30 21:28:07 +0000 | [diff] [blame^] | 348 | LLVM_DEBUG({ |
| 349 | dbgs() << "Externals after applying lookup result:\n"; |
| 350 | for (auto *A : G->external_atoms()) |
| 351 | dbgs() << " " << A->getName() << ": " |
| 352 | << formatv("{0:x16}", A->getAddress()) << "\n"; |
| 353 | }); |
Lang Hames | 11c8dfa5 | 2019-04-20 17:10:34 +0000 | [diff] [blame] | 354 | assert(llvm::all_of(G->external_atoms(), |
| 355 | [](Atom *A) { return A->getAddress() != 0; }) && |
| 356 | "All atoms should have been resolved by this point"); |
| 357 | } |
| 358 | |
| 359 | void JITLinkerBase::dumpGraph(raw_ostream &OS) { |
| 360 | assert(G && "Graph is not set yet"); |
| 361 | G->dump(dbgs(), [this](Edge::Kind K) { return getEdgeKindName(K); }); |
| 362 | } |
| 363 | |
| 364 | void prune(AtomGraph &G) { |
| 365 | std::vector<DefinedAtom *> Worklist; |
| 366 | DenseMap<DefinedAtom *, std::vector<Edge *>> EdgesToUpdate; |
| 367 | |
| 368 | // Build the initial worklist from all atoms initially live. |
| 369 | for (auto *DA : G.defined_atoms()) { |
| 370 | if (!DA->isLive() || DA->shouldDiscard()) |
| 371 | continue; |
| 372 | |
| 373 | for (auto &E : DA->edges()) { |
| 374 | if (!E.getTarget().isDefined()) |
| 375 | continue; |
| 376 | |
| 377 | auto &EDT = static_cast<DefinedAtom &>(E.getTarget()); |
| 378 | |
| 379 | if (EDT.shouldDiscard()) |
| 380 | EdgesToUpdate[&EDT].push_back(&E); |
| 381 | else if (E.isKeepAlive() && !EDT.isLive()) |
| 382 | Worklist.push_back(&EDT); |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | // Propagate live flags to all atoms reachable from the initial live set. |
| 387 | while (!Worklist.empty()) { |
| 388 | DefinedAtom &NextLive = *Worklist.back(); |
| 389 | Worklist.pop_back(); |
| 390 | |
| 391 | assert(!NextLive.shouldDiscard() && |
| 392 | "should-discard nodes should never make it into the worklist"); |
| 393 | |
| 394 | // If this atom has already been marked as live, or is marked to be |
| 395 | // discarded, then skip it. |
| 396 | if (NextLive.isLive()) |
| 397 | continue; |
| 398 | |
| 399 | // Otherwise set it as live and add any non-live atoms that it points to |
| 400 | // to the worklist. |
| 401 | NextLive.setLive(true); |
| 402 | |
| 403 | for (auto &E : NextLive.edges()) { |
| 404 | if (!E.getTarget().isDefined()) |
| 405 | continue; |
| 406 | |
| 407 | auto &EDT = static_cast<DefinedAtom &>(E.getTarget()); |
| 408 | |
| 409 | if (EDT.shouldDiscard()) |
| 410 | EdgesToUpdate[&EDT].push_back(&E); |
| 411 | else if (E.isKeepAlive() && !EDT.isLive()) |
| 412 | Worklist.push_back(&EDT); |
| 413 | } |
| 414 | } |
| 415 | |
| 416 | // Collect atoms to remove, then remove them from the graph. |
| 417 | std::vector<DefinedAtom *> AtomsToRemove; |
| 418 | for (auto *DA : G.defined_atoms()) |
| 419 | if (DA->shouldDiscard() || !DA->isLive()) |
| 420 | AtomsToRemove.push_back(DA); |
| 421 | |
| 422 | LLVM_DEBUG(dbgs() << "Pruning atoms:\n"); |
| 423 | for (auto *DA : AtomsToRemove) { |
| 424 | LLVM_DEBUG(dbgs() << " " << *DA << "... "); |
| 425 | |
| 426 | // Check whether we need to replace this atom with an external atom. |
| 427 | // |
| 428 | // We replace if all of the following hold: |
| 429 | // (1) The atom is marked should-discard, |
| 430 | // (2) it is live, and |
| 431 | // (3) it has edges pointing to it. |
| 432 | // |
| 433 | // Otherwise we simply delete the atom. |
| 434 | bool ReplaceWithExternal = DA->isLive() && DA->shouldDiscard(); |
| 435 | std::vector<Edge *> *EdgesToUpdateForDA = nullptr; |
| 436 | if (ReplaceWithExternal) { |
| 437 | auto ETUItr = EdgesToUpdate.find(DA); |
| 438 | if (ETUItr == EdgesToUpdate.end()) |
| 439 | ReplaceWithExternal = false; |
| 440 | else |
| 441 | EdgesToUpdateForDA = &ETUItr->second; |
| 442 | } |
| 443 | |
| 444 | G.removeDefinedAtom(*DA); |
| 445 | |
| 446 | if (ReplaceWithExternal) { |
| 447 | assert(EdgesToUpdateForDA && |
| 448 | "Replacing atom: There should be edges to update"); |
| 449 | |
| 450 | auto &ExternalReplacement = G.addExternalAtom(DA->getName()); |
| 451 | for (auto *EdgeToUpdate : *EdgesToUpdateForDA) |
| 452 | EdgeToUpdate->setTarget(ExternalReplacement); |
| 453 | LLVM_DEBUG(dbgs() << "replaced with " << ExternalReplacement << "\n"); |
| 454 | } else |
| 455 | LLVM_DEBUG(dbgs() << "deleted\n"); |
| 456 | } |
| 457 | |
| 458 | // Finally, discard any absolute symbols that were marked should-discard. |
| 459 | { |
| 460 | std::vector<Atom *> AbsoluteAtomsToRemove; |
| 461 | for (auto *A : G.absolute_atoms()) |
| 462 | if (A->shouldDiscard() || A->isLive()) |
| 463 | AbsoluteAtomsToRemove.push_back(A); |
| 464 | for (auto *A : AbsoluteAtomsToRemove) |
| 465 | G.removeAbsoluteAtom(*A); |
| 466 | } |
| 467 | } |
| 468 | |
| 469 | } // end namespace jitlink |
| 470 | } // end namespace llvm |