blob: d61d2588a7526841e565229e4acaa60b80191586 [file] [log] [blame]
Chris Lattner166f2262004-11-22 22:00:25 +00001//===-- JITEmitter.cpp - Write machine code to executable memory ----------===//
Misha Brukmanf976c852005-04-21 22:55:34 +00002//
John Criswellb576c942003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
Misha Brukmanf976c852005-04-21 22:55:34 +00007//
John Criswellb576c942003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattnerbd199fb2002-12-24 00:01:05 +00009//
Chris Lattner5be478f2004-11-20 03:46:14 +000010// This file defines a MachineCodeEmitter object that is used by the JIT to
11// write machine code to memory and remember where relocatable values are.
Chris Lattnerbd199fb2002-12-24 00:01:05 +000012//
13//===----------------------------------------------------------------------===//
14
Chris Lattner3785fad2003-08-05 17:00:32 +000015#define DEBUG_TYPE "jit"
Chris Lattner4d326fa2003-12-20 01:46:27 +000016#include "JIT.h"
Chris Lattner2c0a6a12003-11-30 04:23:21 +000017#include "llvm/Constant.h"
18#include "llvm/Module.h"
Chris Lattner5b3a4552005-03-17 15:38:16 +000019#include "llvm/Type.h"
Chris Lattnerbd199fb2002-12-24 00:01:05 +000020#include "llvm/CodeGen/MachineCodeEmitter.h"
21#include "llvm/CodeGen/MachineFunction.h"
Chris Lattner1cc08382003-01-13 01:00:12 +000022#include "llvm/CodeGen/MachineConstantPool.h"
Nate Begeman37efe672006-04-22 18:53:45 +000023#include "llvm/CodeGen/MachineJumpTableInfo.h"
Chris Lattner5be478f2004-11-20 03:46:14 +000024#include "llvm/CodeGen/MachineRelocation.h"
Nate Begeman37efe672006-04-22 18:53:45 +000025#include "llvm/ExecutionEngine/GenericValue.h"
Chris Lattner1cc08382003-01-13 01:00:12 +000026#include "llvm/Target/TargetData.h"
Chris Lattner5be478f2004-11-20 03:46:14 +000027#include "llvm/Target/TargetJITInfo.h"
Reid Spencer551ccae2004-09-01 22:55:40 +000028#include "llvm/Support/Debug.h"
Chris Lattnere7fd5532006-05-08 22:00:52 +000029#include "llvm/Support/MutexGuard.h"
Reid Spencer551ccae2004-09-01 22:55:40 +000030#include "llvm/ADT/Statistic.h"
Reid Spencer52b0ba62004-09-11 04:31:03 +000031#include "llvm/System/Memory.h"
Andrew Lenhartha00269b2005-07-29 23:40:16 +000032#include <algorithm>
Chris Lattnerca261802006-01-22 23:41:42 +000033#include <iostream>
Chris Lattnerc19aade2003-12-08 08:06:28 +000034using namespace llvm;
Brian Gaeked0fde302003-11-11 22:41:34 +000035
Chris Lattnerbd199fb2002-12-24 00:01:05 +000036namespace {
Chris Lattnere7386562003-10-20 05:45:49 +000037 Statistic<> NumBytes("jit", "Number of bytes of machine code compiled");
Chris Lattnere884dc22005-07-20 16:29:20 +000038 Statistic<> NumRelos("jit", "Number of relocations applied");
Chris Lattner4d326fa2003-12-20 01:46:27 +000039 JIT *TheJIT = 0;
Chris Lattner54266522004-11-20 23:57:07 +000040}
Chris Lattnerbd199fb2002-12-24 00:01:05 +000041
Chris Lattner54266522004-11-20 23:57:07 +000042
43//===----------------------------------------------------------------------===//
44// JITMemoryManager code.
45//
46namespace {
Chris Lattnere993cc22006-05-11 23:08:08 +000047 /// MemoryRangeHeader - For a range of memory, this is the header that we put
48 /// on the block of memory. It is carefully crafted to be one word of memory.
49 /// Allocated blocks have just this header, free'd blocks have FreeRangeHeader
50 /// which starts with this.
51 struct FreeRangeHeader;
52 struct MemoryRangeHeader {
53 /// ThisAllocated - This is true if this block is currently allocated. If
54 /// not, this can be converted to a FreeRangeHeader.
55 intptr_t ThisAllocated : 1;
56
57 /// PrevAllocated - Keep track of whether the block immediately before us is
58 /// allocated. If not, the word immediately before this header is the size
59 /// of the previous block.
60 intptr_t PrevAllocated : 1;
61
62 /// BlockSize - This is the size in bytes of this memory block,
63 /// including this header.
64 uintptr_t BlockSize : (sizeof(intptr_t)*8 - 2);
65
66
67 /// getBlockAfter - Return the memory block immediately after this one.
68 ///
69 MemoryRangeHeader &getBlockAfter() const {
70 return *(MemoryRangeHeader*)((char*)this+BlockSize);
71 }
72
73 /// getFreeBlockBefore - If the block before this one is free, return it,
74 /// otherwise return null.
75 FreeRangeHeader *getFreeBlockBefore() const {
76 if (PrevAllocated) return 0;
77 intptr_t PrevSize = ((intptr_t *)this)[-1];
78 return (FreeRangeHeader*)((char*)this-PrevSize);
79 }
80
81 /// MakeFreeBlock - Turn an allocated block into a free block, adjusting
82 /// bits in the object headers, and adding an end of region memory block.
83 FreeRangeHeader &MakeFreeBlock(FreeRangeHeader *FreeList);
84
85 /// TrimAllocationToSize - If this allocated block is significantly larger
86 /// than NewSize, split it into two pieces (where the former is NewSize
87 /// bytes, including the header), and add the new block to the free list.
88 FreeRangeHeader *TrimAllocationToSize(FreeRangeHeader *FreeList,
89 uint64_t NewSize);
90 };
91
92 /// FreeRangeHeader - For a memory block that isn't already allocated, this
93 /// keeps track of the current block and has a pointer to the next free block.
94 /// Free blocks are kept on a circularly linked list.
95 struct FreeRangeHeader : public MemoryRangeHeader {
96 FreeRangeHeader *Prev;
97 FreeRangeHeader *Next;
98
99 /// getMinBlockSize - Get the minimum size for a memory block. Blocks
100 /// smaller than this size cannot be created.
101 static unsigned getMinBlockSize() {
102 return sizeof(FreeRangeHeader)+sizeof(intptr_t);
103 }
104
105 /// SetEndOfBlockSizeMarker - The word at the end of every free block is
106 /// known to be the size of the free block. Set it for this block.
107 void SetEndOfBlockSizeMarker() {
108 void *EndOfBlock = (char*)this + BlockSize;
109 ((intptr_t *)EndOfBlock)[-1] = BlockSize;
110 }
111
112 FreeRangeHeader *RemoveFromFreeList() {
113 assert(Next->Prev == this && Prev->Next == this && "Freelist broken!");
114 Next->Prev = Prev;
115 return Prev->Next = Next;
116 }
117
118 void AddToFreeList(FreeRangeHeader *FreeList) {
119 Next = FreeList;
120 Prev = FreeList->Prev;
121 Prev->Next = this;
122 Next->Prev = this;
123 }
124
125 /// GrowBlock - The block after this block just got deallocated. Merge it
126 /// into the current block.
127 void GrowBlock(uintptr_t NewSize);
128
129 /// AllocateBlock - Mark this entire block allocated, updating freelists
130 /// etc. This returns a pointer to the circular free-list.
131 FreeRangeHeader *AllocateBlock();
132 };
133}
134
135
136/// AllocateBlock - Mark this entire block allocated, updating freelists
137/// etc. This returns a pointer to the circular free-list.
138FreeRangeHeader *FreeRangeHeader::AllocateBlock() {
139 assert(!ThisAllocated && !getBlockAfter().PrevAllocated &&
140 "Cannot allocate an allocated block!");
141 // Mark this block allocated.
142 ThisAllocated = 1;
143 getBlockAfter().PrevAllocated = 1;
144
145 // Remove it from the free list.
146 return RemoveFromFreeList();
147}
148
149/// MakeFreeBlock - Turn an allocated block into a free block, adjusting
150/// bits in the object headers, and adding an end of region memory block.
151/// If possible, coallesce this block with neighboring blocks. Return the
152/// FreeRangeHeader this block ends up in, which may be != this if it got
153/// coallesced.
154FreeRangeHeader &MemoryRangeHeader::MakeFreeBlock(FreeRangeHeader *FreeList) {
155 MemoryRangeHeader *FollowingBlock = &getBlockAfter();
156 assert(ThisAllocated && "This block is already allocated!");
157 assert(FollowingBlock->PrevAllocated && "Flags out of sync!");
158
159 // If the block after this one is free, merge it into this block.
160 if (!FollowingBlock->ThisAllocated) {
161 FreeRangeHeader &FollowingFreeBlock = *(FreeRangeHeader *)FollowingBlock;
162 FollowingFreeBlock.RemoveFromFreeList();
163
164 // Include the following block into this one.
165 BlockSize += FollowingFreeBlock.BlockSize;
166 FollowingBlock = &FollowingFreeBlock.getBlockAfter();
167
168 // Tell the block after the block we are coallescing that this block is
169 // allocated.
170 FollowingBlock->PrevAllocated = 1;
171 }
172
173 assert(FollowingBlock->ThisAllocated && "Missed coallescing?");
174
175 if (FreeRangeHeader *PrevFreeBlock = getFreeBlockBefore()) {
176 PrevFreeBlock->GrowBlock(PrevFreeBlock->BlockSize + BlockSize);
177 return *PrevFreeBlock;
178 }
179
180 // Otherwise, mark this block free.
181 FreeRangeHeader &FreeBlock = *(FreeRangeHeader*)this;
182 FollowingBlock->PrevAllocated = 0;
183 FreeBlock.ThisAllocated = 0;
184
185 // Link this into the linked list of free blocks.
186 FreeBlock.AddToFreeList(FreeList);
187
188 // Add a marker at the end of the block, indicating the size of this free
189 // block.
190 FreeBlock.SetEndOfBlockSizeMarker();
191 return FreeBlock;
192}
193
194/// GrowBlock - The block after this block just got deallocated. Merge it
195/// into the current block.
196void FreeRangeHeader::GrowBlock(uintptr_t NewSize) {
197 assert(NewSize > BlockSize && "Not growing block?");
198 BlockSize = NewSize;
199 SetEndOfBlockSizeMarker();
200}
201
202/// TrimAllocationToSize - If this allocated block is significantly larger
203/// than NewSize, split it into two pieces (where the former is NewSize
204/// bytes, including the header), and add the new block to the free list.
205FreeRangeHeader *MemoryRangeHeader::
206TrimAllocationToSize(FreeRangeHeader *FreeList, uint64_t NewSize) {
207 assert(ThisAllocated && getBlockAfter().PrevAllocated &&
208 "Cannot deallocate part of an allocated block!");
209
210 // Round up size for alignment of header.
211 unsigned HeaderAlign = __alignof(FreeRangeHeader);
212 NewSize = (NewSize+ (HeaderAlign-1)) & ~(HeaderAlign-1);
213
214 // Size is now the size of the block we will remove from the start of the
215 // current block.
216 assert(NewSize <= BlockSize &&
217 "Allocating more space from this block than exists!");
218
219 // If splitting this block will cause the remainder to be too small, do not
220 // split the block.
221 if (BlockSize <= NewSize+FreeRangeHeader::getMinBlockSize())
222 return FreeList;
223
224 // Otherwise, we splice the required number of bytes out of this block, form
225 // a new block immediately after it, then mark this block allocated.
226 MemoryRangeHeader &FormerNextBlock = getBlockAfter();
227
228 // Change the size of this block.
229 BlockSize = NewSize;
230
231 // Get the new block we just sliced out and turn it into a free block.
232 FreeRangeHeader &NewNextBlock = (FreeRangeHeader &)getBlockAfter();
233 NewNextBlock.BlockSize = (char*)&FormerNextBlock - (char*)&NewNextBlock;
234 NewNextBlock.ThisAllocated = 0;
235 NewNextBlock.PrevAllocated = 1;
236 NewNextBlock.SetEndOfBlockSizeMarker();
237 FormerNextBlock.PrevAllocated = 0;
238 NewNextBlock.AddToFreeList(FreeList);
239 return &NewNextBlock;
240}
241
242
243namespace {
Chris Lattner688506d2003-08-14 18:35:27 +0000244 /// JITMemoryManager - Manage memory for the JIT code generation in a logical,
245 /// sane way. This splits a large block of MAP_NORESERVE'd memory into two
246 /// sections, one for function stubs, one for the functions themselves. We
247 /// have to do this because we may need to emit a function stub while in the
248 /// middle of emitting a function, and we don't know how large the function we
249 /// are emitting is. This never bothers to release the memory, because when
250 /// we are ready to destroy the JIT, the program exits.
251 class JITMemoryManager {
Chris Lattnere6fdcbf2006-05-03 00:54:49 +0000252 std::vector<sys::MemoryBlock> Blocks; // Memory blocks allocated by the JIT
Chris Lattnere993cc22006-05-11 23:08:08 +0000253 FreeRangeHeader *FreeMemoryList; // Circular list of free blocks.
254
255 // When emitting code into a memory block, this is the block.
256 MemoryRangeHeader *CurBlock;
257
258 unsigned char *CurStubPtr, *StubBase;
Chris Lattnera726c7f2006-05-02 21:44:14 +0000259 unsigned char *GOTBase; // Target Specific reserved memory
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000260
Chris Lattnere993cc22006-05-11 23:08:08 +0000261 // Centralize memory block allocation.
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000262 sys::MemoryBlock getNewMemoryBlock(unsigned size);
Chris Lattnere993cc22006-05-11 23:08:08 +0000263
264 std::map<const Function*, MemoryRangeHeader*> FunctionBlocks;
Chris Lattner688506d2003-08-14 18:35:27 +0000265 public:
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000266 JITMemoryManager(bool useGOT);
Reid Spencer4af3da62004-12-13 16:04:04 +0000267 ~JITMemoryManager();
Misha Brukmanf976c852005-04-21 22:55:34 +0000268
Chris Lattner688506d2003-08-14 18:35:27 +0000269 inline unsigned char *allocateStub(unsigned StubSize);
Chris Lattnere993cc22006-05-11 23:08:08 +0000270
271 /// startFunctionBody - When a function starts, allocate a block of free
272 /// executable memory, returning a pointer to it and its actual size.
273 unsigned char *startFunctionBody(uintptr_t &ActualSize) {
274 CurBlock = FreeMemoryList;
275
276 // Allocate the entire memory block.
277 FreeMemoryList = FreeMemoryList->AllocateBlock();
278 ActualSize = CurBlock->BlockSize-sizeof(MemoryRangeHeader);
279 return (unsigned char *)(CurBlock+1);
280 }
281
282 /// endFunctionBody - The function F is now allocated, and takes the memory
283 /// in the range [FunctionStart,FunctionEnd).
284 void endFunctionBody(const Function *F, unsigned char *FunctionStart,
285 unsigned char *FunctionEnd) {
286 assert(FunctionEnd > FunctionStart);
287 assert(FunctionStart == (unsigned char *)(CurBlock+1) &&
288 "Mismatched function start/end!");
289
290 uintptr_t BlockSize = FunctionEnd - (unsigned char *)CurBlock;
291 FunctionBlocks[F] = CurBlock;
292
293 // Release the memory at the end of this block that isn't needed.
294 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
295 }
Chris Lattnera726c7f2006-05-02 21:44:14 +0000296
297 unsigned char *getGOTBase() const {
298 return GOTBase;
299 }
300 bool isManagingGOT() const {
301 return GOTBase != NULL;
302 }
Chris Lattnere993cc22006-05-11 23:08:08 +0000303
304 /// deallocateMemForFunction - Deallocate all memory for the specified
305 /// function body.
306 void deallocateMemForFunction(const Function *F) {
307
308 }
Chris Lattner688506d2003-08-14 18:35:27 +0000309 };
310}
311
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000312JITMemoryManager::JITMemoryManager(bool useGOT) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000313 // Allocate a 16M block of memory for functions.
314 sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20);
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000315
Chris Lattnere993cc22006-05-11 23:08:08 +0000316 unsigned char *MemBase = reinterpret_cast<unsigned char*>(MemBlock.base());
Chris Lattner281a6012005-01-10 18:23:22 +0000317
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000318 // Allocate stubs backwards from the base, allocate functions forward
319 // from the base.
Chris Lattnere993cc22006-05-11 23:08:08 +0000320 StubBase = MemBase;
321 CurStubPtr = MemBase + 512*1024; // Use 512k for stubs, working backwards.
322
323 // We set up the memory chunk with 4 mem regions, like this:
324 // [ START
325 // [ Free #0 ] -> Large space to allocate functions from.
326 // [ Allocated #1 ] -> Tiny space to separate regions.
327 // [ Free #2 ] -> Tiny space so there is always at least 1 free block.
328 // [ Allocated #3 ] -> Tiny space to prevent looking past end of block.
329 // END ]
330 //
331 // The last three blocks are never deallocated or touched.
332
333 // Add MemoryRangeHeader to the end of the memory region, indicating that
334 // the space after the block of memory is allocated. This is block #3.
335 MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1;
336 Mem3->ThisAllocated = 1;
337 Mem3->PrevAllocated = 0;
338 Mem3->BlockSize = 0;
339
340 /// Add a tiny free region so that the free list always has one entry.
341 FreeRangeHeader *Mem2 =
342 (FreeRangeHeader *)(((char*)Mem3)-FreeRangeHeader::getMinBlockSize());
343 Mem2->ThisAllocated = 0;
344 Mem2->PrevAllocated = 1;
345 Mem2->BlockSize = FreeRangeHeader::getMinBlockSize();
346 Mem2->SetEndOfBlockSizeMarker();
347 Mem2->Prev = Mem2; // Mem2 *is* the free list for now.
348 Mem2->Next = Mem2;
349
350 /// Add a tiny allocated region so that Mem2 is never coallesced away.
351 MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1;
352 Mem2->ThisAllocated = 1;
353 Mem2->PrevAllocated = 0;
354 Mem2->BlockSize = (char*)Mem2 - (char*)Mem1;
355
356 // Add a FreeRangeHeader to the start of the function body region, indicating
357 // that the space is free. Mark the previous block allocated so we never look
358 // at it.
359 FreeRangeHeader *Mem0 = (FreeRangeHeader*)CurStubPtr;
360 Mem0->ThisAllocated = 0;
361 Mem0->PrevAllocated = 1;
362 Mem0->BlockSize = (unsigned char*)Mem1-(unsigned char*)Mem0;
363 Mem0->SetEndOfBlockSizeMarker();
364 Mem0->AddToFreeList(Mem2);
365
366 // Start out with the freelist pointing to Mem0.
367 FreeMemoryList = Mem0;
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000368
Chris Lattnerf5d438c2006-05-02 21:57:51 +0000369 // Allocate the GOT.
Andrew Lenharth2b3b89c2005-08-01 17:35:40 +0000370 GOTBase = NULL;
Chris Lattnerf5d438c2006-05-02 21:57:51 +0000371 if (useGOT) GOTBase = (unsigned char*)malloc(sizeof(void*) * 8192);
Chris Lattner688506d2003-08-14 18:35:27 +0000372}
373
Reid Spencer4af3da62004-12-13 16:04:04 +0000374JITMemoryManager::~JITMemoryManager() {
Chris Lattnere6fdcbf2006-05-03 00:54:49 +0000375 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
376 sys::Memory::ReleaseRWX(Blocks[i]);
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000377 Blocks.clear();
Reid Spencer4af3da62004-12-13 16:04:04 +0000378}
379
Chris Lattner688506d2003-08-14 18:35:27 +0000380unsigned char *JITMemoryManager::allocateStub(unsigned StubSize) {
381 CurStubPtr -= StubSize;
Chris Lattnere993cc22006-05-11 23:08:08 +0000382 if (CurStubPtr < StubBase) {
Chris Lattnera726c7f2006-05-02 21:44:14 +0000383 // FIXME: allocate a new block
Chris Lattner688506d2003-08-14 18:35:27 +0000384 std::cerr << "JIT ran out of memory for function stubs!\n";
385 abort();
386 }
387 return CurStubPtr;
388}
389
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000390sys::MemoryBlock JITMemoryManager::getNewMemoryBlock(unsigned size) {
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000391 try {
Chris Lattnere993cc22006-05-11 23:08:08 +0000392 // Allocate a new block close to the last one.
Chris Lattnere6fdcbf2006-05-03 00:54:49 +0000393 const sys::MemoryBlock *BOld = Blocks.empty() ? 0 : &Blocks.front();
394 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, BOld);
395 Blocks.push_back(B);
396 return B;
397 } catch (std::string &err) {
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000398 std::cerr << "Allocation failed when allocating new memory in the JIT\n";
399 std::cerr << err << "\n";
400 abort();
401 }
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000402}
403
Chris Lattner54266522004-11-20 23:57:07 +0000404//===----------------------------------------------------------------------===//
405// JIT lazy compilation code.
406//
407namespace {
Reid Spenceree448632005-07-12 15:51:55 +0000408 class JITResolverState {
409 private:
410 /// FunctionToStubMap - Keep track of the stub created for a particular
411 /// function so that we can reuse them if necessary.
412 std::map<Function*, void*> FunctionToStubMap;
413
414 /// StubToFunctionMap - Keep track of the function that each stub
415 /// corresponds to.
416 std::map<void*, Function*> StubToFunctionMap;
Jeff Cohen00b168892005-07-27 06:12:32 +0000417
Reid Spenceree448632005-07-12 15:51:55 +0000418 public:
419 std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) {
420 assert(locked.holds(TheJIT->lock));
421 return FunctionToStubMap;
422 }
Jeff Cohen00b168892005-07-27 06:12:32 +0000423
Reid Spenceree448632005-07-12 15:51:55 +0000424 std::map<void*, Function*>& getStubToFunctionMap(const MutexGuard& locked) {
425 assert(locked.holds(TheJIT->lock));
426 return StubToFunctionMap;
427 }
428 };
Jeff Cohen00b168892005-07-27 06:12:32 +0000429
Chris Lattner54266522004-11-20 23:57:07 +0000430 /// JITResolver - Keep track of, and resolve, call sites for functions that
431 /// have not yet been compiled.
432 class JITResolver {
Chris Lattner5e225582004-11-21 03:37:42 +0000433 /// MCE - The MachineCodeEmitter to use to emit stubs with.
Chris Lattner54266522004-11-20 23:57:07 +0000434 MachineCodeEmitter &MCE;
435
Chris Lattner5e225582004-11-21 03:37:42 +0000436 /// LazyResolverFn - The target lazy resolver function that we actually
437 /// rewrite instructions to use.
438 TargetJITInfo::LazyResolverFn LazyResolverFn;
439
Reid Spenceree448632005-07-12 15:51:55 +0000440 JITResolverState state;
Chris Lattner54266522004-11-20 23:57:07 +0000441
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000442 /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for
443 /// external functions.
444 std::map<void*, void*> ExternalFnToStubMap;
Andrew Lenharth6a974612005-07-28 12:44:13 +0000445
446 //map addresses to indexes in the GOT
447 std::map<void*, unsigned> revGOTMap;
448 unsigned nextGOTIndex;
449
Chris Lattner54266522004-11-20 23:57:07 +0000450 public:
Andrew Lenharth6a974612005-07-28 12:44:13 +0000451 JITResolver(MachineCodeEmitter &mce) : MCE(mce), nextGOTIndex(0) {
Chris Lattner5e225582004-11-21 03:37:42 +0000452 LazyResolverFn =
453 TheJIT->getJITInfo().getLazyResolverFunction(JITCompilerFn);
454 }
Chris Lattner54266522004-11-20 23:57:07 +0000455
456 /// getFunctionStub - This returns a pointer to a function stub, creating
457 /// one on demand as needed.
458 void *getFunctionStub(Function *F);
459
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000460 /// getExternalFunctionStub - Return a stub for the function at the
461 /// specified address, created lazily on demand.
462 void *getExternalFunctionStub(void *FnAddr);
463
Chris Lattner5e225582004-11-21 03:37:42 +0000464 /// AddCallbackAtLocation - If the target is capable of rewriting an
465 /// instruction without the use of a stub, record the location of the use so
466 /// we know which function is being used at the location.
467 void *AddCallbackAtLocation(Function *F, void *Location) {
Reid Spenceree448632005-07-12 15:51:55 +0000468 MutexGuard locked(TheJIT->lock);
Chris Lattner5e225582004-11-21 03:37:42 +0000469 /// Get the target-specific JIT resolver function.
Reid Spenceree448632005-07-12 15:51:55 +0000470 state.getStubToFunctionMap(locked)[Location] = F;
Chris Lattner5e225582004-11-21 03:37:42 +0000471 return (void*)LazyResolverFn;
472 }
473
Andrew Lenharth6a974612005-07-28 12:44:13 +0000474 /// getGOTIndexForAddress - Return a new or existing index in the GOT for
475 /// and address. This function only manages slots, it does not manage the
476 /// contents of the slots or the memory associated with the GOT.
477 unsigned getGOTIndexForAddr(void* addr);
478
Chris Lattner54266522004-11-20 23:57:07 +0000479 /// JITCompilerFn - This function is called to resolve a stub to a compiled
480 /// address. If the LLVM Function corresponding to the stub has not yet
481 /// been compiled, this function compiles it first.
482 static void *JITCompilerFn(void *Stub);
483 };
484}
485
486/// getJITResolver - This function returns the one instance of the JIT resolver.
487///
488static JITResolver &getJITResolver(MachineCodeEmitter *MCE = 0) {
489 static JITResolver TheJITResolver(*MCE);
490 return TheJITResolver;
491}
492
493/// getFunctionStub - This returns a pointer to a function stub, creating
494/// one on demand as needed.
495void *JITResolver::getFunctionStub(Function *F) {
Reid Spenceree448632005-07-12 15:51:55 +0000496 MutexGuard locked(TheJIT->lock);
497
Chris Lattner54266522004-11-20 23:57:07 +0000498 // If we already have a stub for this function, recycle it.
Reid Spenceree448632005-07-12 15:51:55 +0000499 void *&Stub = state.getFunctionToStubMap(locked)[F];
Chris Lattner54266522004-11-20 23:57:07 +0000500 if (Stub) return Stub;
501
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000502 // Call the lazy resolver function unless we already KNOW it is an external
503 // function, in which case we just skip the lazy resolution step.
504 void *Actual = (void*)LazyResolverFn;
Chris Lattner69435702005-02-20 18:43:35 +0000505 if (F->isExternal() && F->hasExternalLinkage())
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000506 Actual = TheJIT->getPointerToFunction(F);
Misha Brukmanf976c852005-04-21 22:55:34 +0000507
Chris Lattner54266522004-11-20 23:57:07 +0000508 // Otherwise, codegen a new stub. For now, the stub will call the lazy
509 // resolver function.
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000510 Stub = TheJIT->getJITInfo().emitFunctionStub(Actual, MCE);
511
Chris Lattner69435702005-02-20 18:43:35 +0000512 if (Actual != (void*)LazyResolverFn) {
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000513 // If we are getting the stub for an external function, we really want the
514 // address of the stub in the GlobalAddressMap for the JIT, not the address
515 // of the external function.
516 TheJIT->updateGlobalMapping(F, Stub);
517 }
Chris Lattner54266522004-11-20 23:57:07 +0000518
Chris Lattnercb479412004-11-21 03:44:32 +0000519 DEBUG(std::cerr << "JIT: Stub emitted at [" << Stub << "] for function '"
Chris Lattner6f717202004-11-22 21:48:33 +0000520 << F->getName() << "'\n");
Chris Lattnercb479412004-11-21 03:44:32 +0000521
Chris Lattner54266522004-11-20 23:57:07 +0000522 // Finally, keep track of the stub-to-Function mapping so that the
523 // JITCompilerFn knows which function to compile!
Reid Spenceree448632005-07-12 15:51:55 +0000524 state.getStubToFunctionMap(locked)[Stub] = F;
Chris Lattner54266522004-11-20 23:57:07 +0000525 return Stub;
526}
527
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000528/// getExternalFunctionStub - Return a stub for the function at the
529/// specified address, created lazily on demand.
530void *JITResolver::getExternalFunctionStub(void *FnAddr) {
531 // If we already have a stub for this function, recycle it.
532 void *&Stub = ExternalFnToStubMap[FnAddr];
533 if (Stub) return Stub;
534
535 Stub = TheJIT->getJITInfo().emitFunctionStub(FnAddr, MCE);
536 DEBUG(std::cerr << "JIT: Stub emitted at [" << Stub
537 << "] for external function at '" << FnAddr << "'\n");
538 return Stub;
539}
540
Andrew Lenharth6a974612005-07-28 12:44:13 +0000541unsigned JITResolver::getGOTIndexForAddr(void* addr) {
542 unsigned idx = revGOTMap[addr];
543 if (!idx) {
544 idx = ++nextGOTIndex;
545 revGOTMap[addr] = idx;
546 DEBUG(std::cerr << "Adding GOT entry " << idx
547 << " for addr " << addr << "\n");
548 // ((void**)MemMgr.getGOTBase())[idx] = addr;
549 }
550 return idx;
551}
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000552
Chris Lattner54266522004-11-20 23:57:07 +0000553/// JITCompilerFn - This function is called when a lazy compilation stub has
554/// been entered. It looks up which function this stub corresponds to, compiles
555/// it if necessary, then returns the resultant function pointer.
556void *JITResolver::JITCompilerFn(void *Stub) {
557 JITResolver &JR = getJITResolver();
Misha Brukmanf976c852005-04-21 22:55:34 +0000558
Reid Spenceree448632005-07-12 15:51:55 +0000559 MutexGuard locked(TheJIT->lock);
560
Chris Lattner54266522004-11-20 23:57:07 +0000561 // The address given to us for the stub may not be exactly right, it might be
562 // a little bit after the stub. As such, use upper_bound to find it.
563 std::map<void*, Function*>::iterator I =
Reid Spenceree448632005-07-12 15:51:55 +0000564 JR.state.getStubToFunctionMap(locked).upper_bound(Stub);
Chris Lattner21998772006-01-07 06:20:51 +0000565 assert(I != JR.state.getStubToFunctionMap(locked).begin() &&
566 "This is not a known stub!");
Chris Lattner54266522004-11-20 23:57:07 +0000567 Function *F = (--I)->second;
568
Reid Spenceree448632005-07-12 15:51:55 +0000569 // We might like to remove the stub from the StubToFunction map.
570 // We can't do that! Multiple threads could be stuck, waiting to acquire the
571 // lock above. As soon as the 1st function finishes compiling the function,
Chris Lattner21998772006-01-07 06:20:51 +0000572 // the next one will be released, and needs to be able to find the function it
573 // needs to call.
Reid Spenceree448632005-07-12 15:51:55 +0000574 //JR.state.getStubToFunctionMap(locked).erase(I);
Chris Lattner54266522004-11-20 23:57:07 +0000575
Chris Lattnercb479412004-11-21 03:44:32 +0000576 DEBUG(std::cerr << "JIT: Lazily resolving function '" << F->getName()
Chris Lattner54266522004-11-20 23:57:07 +0000577 << "' In stub ptr = " << Stub << " actual ptr = "
578 << I->first << "\n");
579
580 void *Result = TheJIT->getPointerToFunction(F);
581
582 // We don't need to reuse this stub in the future, as F is now compiled.
Reid Spenceree448632005-07-12 15:51:55 +0000583 JR.state.getFunctionToStubMap(locked).erase(F);
Chris Lattner54266522004-11-20 23:57:07 +0000584
585 // FIXME: We could rewrite all references to this stub if we knew them.
Andrew Lenharth6a974612005-07-28 12:44:13 +0000586
Jeff Cohend29b6aa2005-07-30 18:33:25 +0000587 // What we will do is set the compiled function address to map to the
588 // same GOT entry as the stub so that later clients may update the GOT
Andrew Lenharth6a974612005-07-28 12:44:13 +0000589 // if they see it still using the stub address.
590 // Note: this is done so the Resolver doesn't have to manage GOT memory
591 // Do this without allocating map space if the target isn't using a GOT
592 if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end())
593 JR.revGOTMap[Result] = JR.revGOTMap[Stub];
594
Chris Lattner54266522004-11-20 23:57:07 +0000595 return Result;
596}
Chris Lattner688506d2003-08-14 18:35:27 +0000597
598
Chris Lattner54266522004-11-20 23:57:07 +0000599//===----------------------------------------------------------------------===//
Chris Lattner166f2262004-11-22 22:00:25 +0000600// JITEmitter code.
Chris Lattner54266522004-11-20 23:57:07 +0000601//
Chris Lattner688506d2003-08-14 18:35:27 +0000602namespace {
Chris Lattner166f2262004-11-22 22:00:25 +0000603 /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
604 /// used to output functions to memory for execution.
605 class JITEmitter : public MachineCodeEmitter {
Chris Lattner688506d2003-08-14 18:35:27 +0000606 JITMemoryManager MemMgr;
607
Chris Lattner6125fdd2003-05-09 03:30:07 +0000608 // When outputting a function stub in the context of some other function, we
Chris Lattner43b429b2006-05-02 18:27:26 +0000609 // save BufferBegin/BufferEnd/CurBufferPtr here.
610 unsigned char *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000611
Chris Lattner5be478f2004-11-20 03:46:14 +0000612 /// Relocations - These are the relocations that the function needs, as
613 /// emitted.
614 std::vector<MachineRelocation> Relocations;
Chris Lattnerb4432f32006-05-03 17:10:41 +0000615
616 /// MBBLocations - This vector is a mapping from MBB ID's to their address.
617 /// It is filled in by the StartMachineBasicBlock callback and queried by
618 /// the getMachineBasicBlockAddress callback.
619 std::vector<intptr_t> MBBLocations;
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000620
Chris Lattner239862c2006-02-09 04:49:59 +0000621 /// ConstantPool - The constant pool for the current function.
622 ///
623 MachineConstantPool *ConstantPool;
624
625 /// ConstantPoolBase - A pointer to the first entry in the constant pool.
626 ///
627 void *ConstantPoolBase;
Nate Begeman37efe672006-04-22 18:53:45 +0000628
629 /// ConstantPool - The constant pool for the current function.
630 ///
631 MachineJumpTableInfo *JumpTable;
632
633 /// JumpTableBase - A pointer to the first entry in the jump table.
634 ///
635 void *JumpTableBase;
636public:
Chris Lattner239862c2006-02-09 04:49:59 +0000637 JITEmitter(JIT &jit) : MemMgr(jit.getJITInfo().needsGOT()) {
Jeff Cohen00b168892005-07-27 06:12:32 +0000638 TheJIT = &jit;
Chris Lattnera726c7f2006-05-02 21:44:14 +0000639 DEBUG(if (MemMgr.isManagingGOT()) std::cerr << "JIT is managing a GOT\n");
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000640 }
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000641
642 virtual void startFunction(MachineFunction &F);
Chris Lattner43b429b2006-05-02 18:27:26 +0000643 virtual bool finishFunction(MachineFunction &F);
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000644
645 void emitConstantPool(MachineConstantPool *MCP);
646 void initJumpTableInfo(MachineJumpTableInfo *MJTI);
Chris Lattnerb4432f32006-05-03 17:10:41 +0000647 void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000648
Chris Lattner54266522004-11-20 23:57:07 +0000649 virtual void startFunctionStub(unsigned StubSize);
650 virtual void* finishFunctionStub(const Function *F);
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000651
Chris Lattner5be478f2004-11-20 03:46:14 +0000652 virtual void addRelocation(const MachineRelocation &MR) {
653 Relocations.push_back(MR);
654 }
Chris Lattnerb4432f32006-05-03 17:10:41 +0000655
656 virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
657 if (MBBLocations.size() <= (unsigned)MBB->getNumber())
658 MBBLocations.resize((MBB->getNumber()+1)*2);
659 MBBLocations[MBB->getNumber()] = getCurrentPCValue();
660 }
Chris Lattner5be478f2004-11-20 03:46:14 +0000661
Chris Lattnerb4432f32006-05-03 17:10:41 +0000662 virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const;
663 virtual intptr_t getJumpTableEntryAddress(unsigned Entry) const;
664
665 virtual intptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
666 assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
667 MBBLocations[MBB->getNumber()] && "MBB not emitted!");
668 return MBBLocations[MBB->getNumber()];
669 }
670
Chris Lattnere993cc22006-05-11 23:08:08 +0000671 /// deallocateMemForFunction - Deallocate all memory for the specified
672 /// function body.
673 void deallocateMemForFunction(Function *F) {
674 MemMgr.deallocateMemForFunction(F);
675 }
Chris Lattner54266522004-11-20 23:57:07 +0000676 private:
Chris Lattner5e225582004-11-21 03:37:42 +0000677 void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub);
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000678 };
679}
680
Chris Lattner166f2262004-11-22 22:00:25 +0000681void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
682 bool DoesntNeedStub) {
Chris Lattner54266522004-11-20 23:57:07 +0000683 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
684 /// FIXME: If we straightened things out, this could actually emit the
685 /// global immediately instead of queuing it for codegen later!
Chris Lattner54266522004-11-20 23:57:07 +0000686 return TheJIT->getOrEmitGlobalVariable(GV);
687 }
688
689 // If we have already compiled the function, return a pointer to its body.
690 Function *F = cast<Function>(V);
691 void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
692 if (ResultPtr) return ResultPtr;
693
Chris Lattner532343b2004-11-30 17:41:49 +0000694 if (F->hasExternalLinkage() && F->isExternal()) {
Chris Lattner54266522004-11-20 23:57:07 +0000695 // If this is an external function pointer, we can force the JIT to
696 // 'compile' it, which really just adds it to the map.
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000697 if (DoesntNeedStub)
698 return TheJIT->getPointerToFunction(F);
699
700 return getJITResolver(this).getFunctionStub(F);
Chris Lattner54266522004-11-20 23:57:07 +0000701 }
702
Chris Lattner5e225582004-11-21 03:37:42 +0000703 // Okay, the function has not been compiled yet, if the target callback
704 // mechanism is capable of rewriting the instruction directly, prefer to do
705 // that instead of emitting a stub.
706 if (DoesntNeedStub)
707 return getJITResolver(this).AddCallbackAtLocation(F, Reference);
708
Chris Lattner54266522004-11-20 23:57:07 +0000709 // Otherwise, we have to emit a lazy resolving stub.
710 return getJITResolver(this).getFunctionStub(F);
711}
712
Chris Lattner166f2262004-11-22 22:00:25 +0000713void JITEmitter::startFunction(MachineFunction &F) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000714 uintptr_t ActualSize;
715 BufferBegin = CurBufferPtr = MemMgr.startFunctionBody(ActualSize);
716 BufferEnd = BufferBegin+ActualSize;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000717
718 emitConstantPool(F.getConstantPool());
719 initJumpTableInfo(F.getJumpTableInfo());
720
721 // About to start emitting the machine code for the function.
Chris Lattner0eb4d6b2006-05-03 01:03:20 +0000722 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000723 TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr);
Chris Lattnerb4432f32006-05-03 17:10:41 +0000724
725 MBBLocations.clear();
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000726}
727
Chris Lattner43b429b2006-05-02 18:27:26 +0000728bool JITEmitter::finishFunction(MachineFunction &F) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000729 if (CurBufferPtr == BufferEnd) {
730 // FIXME: Allocate more space, then try again.
731 std::cerr << "JIT: Ran out of space for generated machine code!\n";
732 abort();
733 }
734
Chris Lattnerb4432f32006-05-03 17:10:41 +0000735 emitJumpTableInfo(F.getJumpTableInfo());
736
Chris Lattnere993cc22006-05-11 23:08:08 +0000737 MemMgr.endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
Chris Lattner43b429b2006-05-02 18:27:26 +0000738 NumBytes += getCurrentPCOffset();
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000739
Chris Lattner5be478f2004-11-20 03:46:14 +0000740 if (!Relocations.empty()) {
Chris Lattnere884dc22005-07-20 16:29:20 +0000741 NumRelos += Relocations.size();
742
Chris Lattner5be478f2004-11-20 03:46:14 +0000743 // Resolve the relocations to concrete pointers.
744 for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
745 MachineRelocation &MR = Relocations[i];
746 void *ResultPtr;
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000747 if (MR.isString()) {
Chris Lattner5be478f2004-11-20 03:46:14 +0000748 ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString());
Misha Brukmanf976c852005-04-21 22:55:34 +0000749
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000750 // If the target REALLY wants a stub for this function, emit it now.
751 if (!MR.doesntNeedFunctionStub())
752 ResultPtr = getJITResolver(this).getExternalFunctionStub(ResultPtr);
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000753 } else if (MR.isGlobalValue()) {
Chris Lattner5e225582004-11-21 03:37:42 +0000754 ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
Chris Lattner43b429b2006-05-02 18:27:26 +0000755 BufferBegin+MR.getMachineCodeOffset(),
Chris Lattner5e225582004-11-21 03:37:42 +0000756 MR.doesntNeedFunctionStub());
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000757 } else {
758 assert(MR.isConstantPoolIndex());
759 ResultPtr=(void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
760 }
Jeff Cohen00b168892005-07-27 06:12:32 +0000761
Chris Lattner5be478f2004-11-20 03:46:14 +0000762 MR.setResultPointer(ResultPtr);
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000763
Andrew Lenharth6a974612005-07-28 12:44:13 +0000764 // if we are managing the GOT and the relocation wants an index,
765 // give it one
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000766 if (MemMgr.isManagingGOT() && MR.isGOTRelative()) {
Andrew Lenharth6a974612005-07-28 12:44:13 +0000767 unsigned idx = getJITResolver(this).getGOTIndexForAddr(ResultPtr);
768 MR.setGOTIndex(idx);
769 if (((void**)MemMgr.getGOTBase())[idx] != ResultPtr) {
770 DEBUG(std::cerr << "GOT was out of date for " << ResultPtr
Chris Lattner21998772006-01-07 06:20:51 +0000771 << " pointing at " << ((void**)MemMgr.getGOTBase())[idx]
772 << "\n");
Andrew Lenharth6a974612005-07-28 12:44:13 +0000773 ((void**)MemMgr.getGOTBase())[idx] = ResultPtr;
774 }
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000775 }
Chris Lattner5be478f2004-11-20 03:46:14 +0000776 }
777
Chris Lattner43b429b2006-05-02 18:27:26 +0000778 TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000779 Relocations.size(), MemMgr.getGOTBase());
Chris Lattner5be478f2004-11-20 03:46:14 +0000780 }
781
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000782 // Update the GOT entry for F to point to the new code.
Andrew Lenharth6a974612005-07-28 12:44:13 +0000783 if(MemMgr.isManagingGOT()) {
Chris Lattner43b429b2006-05-02 18:27:26 +0000784 unsigned idx = getJITResolver(this).getGOTIndexForAddr((void*)BufferBegin);
785 if (((void**)MemMgr.getGOTBase())[idx] != (void*)BufferBegin) {
786 DEBUG(std::cerr << "GOT was out of date for " << (void*)BufferBegin
Andrew Lenharth6a974612005-07-28 12:44:13 +0000787 << " pointing at " << ((void**)MemMgr.getGOTBase())[idx] << "\n");
Chris Lattner43b429b2006-05-02 18:27:26 +0000788 ((void**)MemMgr.getGOTBase())[idx] = (void*)BufferBegin;
Andrew Lenharth6a974612005-07-28 12:44:13 +0000789 }
790 }
791
Chris Lattner43b429b2006-05-02 18:27:26 +0000792 DEBUG(std::cerr << "JIT: Finished CodeGen of [" << (void*)BufferBegin
Misha Brukman1d440852003-06-06 06:52:35 +0000793 << "] Function: " << F.getFunction()->getName()
Chris Lattner43b429b2006-05-02 18:27:26 +0000794 << ": " << getCurrentPCOffset() << " bytes of text, "
Chris Lattner5be478f2004-11-20 03:46:14 +0000795 << Relocations.size() << " relocations\n");
796 Relocations.clear();
Chris Lattner43b429b2006-05-02 18:27:26 +0000797 return false;
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000798}
799
Chris Lattner166f2262004-11-22 22:00:25 +0000800void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
Chris Lattnerfa77d432006-02-09 04:22:52 +0000801 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
Chris Lattner2c0a6a12003-11-30 04:23:21 +0000802 if (Constants.empty()) return;
803
Chris Lattner3029f922006-02-09 04:46:04 +0000804 unsigned Size = Constants.back().Offset;
Owen Andersona69571c2006-05-03 01:29:57 +0000805 Size += TheJIT->getTargetData()->getTypeSize(Constants.back().Val->getType());
Chris Lattner2c0a6a12003-11-30 04:23:21 +0000806
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000807 ConstantPoolBase = allocateSpace(Size, 1 << MCP->getConstantPoolAlignment());
Chris Lattner239862c2006-02-09 04:49:59 +0000808 ConstantPool = MCP;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000809
810 if (ConstantPoolBase == 0) return; // Buffer overflow.
811
Chris Lattner239862c2006-02-09 04:49:59 +0000812 // Initialize the memory for all of the constant pool entries.
Chris Lattner3029f922006-02-09 04:46:04 +0000813 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
Chris Lattner239862c2006-02-09 04:49:59 +0000814 void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset;
Chris Lattner3029f922006-02-09 04:46:04 +0000815 TheJIT->InitializeMemory(Constants[i].Val, CAddr);
Chris Lattner1cc08382003-01-13 01:00:12 +0000816 }
817}
818
Nate Begeman37efe672006-04-22 18:53:45 +0000819void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
820 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
821 if (JT.empty()) return;
822
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000823 unsigned NumEntries = 0;
Nate Begeman37efe672006-04-22 18:53:45 +0000824 for (unsigned i = 0, e = JT.size(); i != e; ++i)
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000825 NumEntries += JT[i].MBBs.size();
826
827 unsigned EntrySize = MJTI->getEntrySize();
828
Nate Begeman37efe672006-04-22 18:53:45 +0000829 // Just allocate space for all the jump tables now. We will fix up the actual
830 // MBB entries in the tables after we emit the code for each block, since then
831 // we will know the final locations of the MBBs in memory.
832 JumpTable = MJTI;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000833 JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment());
Nate Begeman37efe672006-04-22 18:53:45 +0000834}
835
Chris Lattnerb4432f32006-05-03 17:10:41 +0000836void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
Nate Begeman37efe672006-04-22 18:53:45 +0000837 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000838 if (JT.empty() || JumpTableBase == 0) return;
Nate Begeman37efe672006-04-22 18:53:45 +0000839
840 unsigned Offset = 0;
Chris Lattner32ca55f2006-05-03 00:13:06 +0000841 assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?");
Nate Begeman37efe672006-04-22 18:53:45 +0000842
843 // For each jump table, map each target in the jump table to the address of
844 // an emitted MachineBasicBlock.
Chris Lattner32ca55f2006-05-03 00:13:06 +0000845 intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
846
Nate Begeman37efe672006-04-22 18:53:45 +0000847 for (unsigned i = 0, e = JT.size(); i != e; ++i) {
848 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
Chris Lattner32ca55f2006-05-03 00:13:06 +0000849 // Store the address of the basic block for this jump table slot in the
850 // memory we allocated for the jump table in 'initJumpTableInfo'
851 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
Chris Lattnerb4432f32006-05-03 17:10:41 +0000852 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]);
Nate Begeman37efe672006-04-22 18:53:45 +0000853 }
854}
855
Chris Lattner166f2262004-11-22 22:00:25 +0000856void JITEmitter::startFunctionStub(unsigned StubSize) {
Chris Lattner43b429b2006-05-02 18:27:26 +0000857 SavedBufferBegin = BufferBegin;
858 SavedBufferEnd = BufferEnd;
859 SavedCurBufferPtr = CurBufferPtr;
860
861 BufferBegin = CurBufferPtr = MemMgr.allocateStub(StubSize);
862 BufferEnd = BufferBegin+StubSize+1;
Chris Lattner6125fdd2003-05-09 03:30:07 +0000863}
864
Chris Lattner166f2262004-11-22 22:00:25 +0000865void *JITEmitter::finishFunctionStub(const Function *F) {
Chris Lattner43b429b2006-05-02 18:27:26 +0000866 NumBytes += getCurrentPCOffset();
867 std::swap(SavedBufferBegin, BufferBegin);
868 BufferEnd = SavedBufferEnd;
869 CurBufferPtr = SavedCurBufferPtr;
870 return SavedBufferBegin;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000871}
872
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000873// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry
874// in the constant pool that was last emitted with the 'emitConstantPool'
875// method.
876//
Chris Lattnerb4432f32006-05-03 17:10:41 +0000877intptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
Chris Lattner239862c2006-02-09 04:49:59 +0000878 assert(ConstantNum < ConstantPool->getConstants().size() &&
Misha Brukman3c944972005-04-22 04:08:30 +0000879 "Invalid ConstantPoolIndex!");
Chris Lattner239862c2006-02-09 04:49:59 +0000880 return (intptr_t)ConstantPoolBase +
881 ConstantPool->getConstants()[ConstantNum].Offset;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000882}
883
Nate Begeman37efe672006-04-22 18:53:45 +0000884// getJumpTableEntryAddress - Return the address of the JumpTable with index
885// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo'
886//
Chris Lattnerb4432f32006-05-03 17:10:41 +0000887intptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
Nate Begeman37efe672006-04-22 18:53:45 +0000888 const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
889 assert(Index < JT.size() && "Invalid jump table index!");
890
891 unsigned Offset = 0;
892 unsigned EntrySize = JumpTable->getEntrySize();
893
894 for (unsigned i = 0; i < Index; ++i)
895 Offset += JT[i].MBBs.size() * EntrySize;
896
Nate Begemanc34b2272006-04-25 17:46:32 +0000897 return (intptr_t)((char *)JumpTableBase + Offset);
Nate Begeman37efe672006-04-22 18:53:45 +0000898}
899
Chris Lattnere993cc22006-05-11 23:08:08 +0000900//===----------------------------------------------------------------------===//
901// Public interface to this file
902//===----------------------------------------------------------------------===//
903
904MachineCodeEmitter *JIT::createEmitter(JIT &jit) {
905 return new JITEmitter(jit);
906}
907
Misha Brukmand69c1e62003-07-28 19:09:06 +0000908// getPointerToNamedFunction - This function is used as a global wrapper to
Chris Lattner4d326fa2003-12-20 01:46:27 +0000909// JIT::getPointerToNamedFunction for the purpose of resolving symbols when
Misha Brukmand69c1e62003-07-28 19:09:06 +0000910// bugpoint is debugging the JIT. In that scenario, we are loading an .so and
911// need to resolve function(s) that are being mis-codegenerated, so we need to
912// resolve their addresses at runtime, and this is the way to do it.
913extern "C" {
914 void *getPointerToNamedFunction(const char *Name) {
Chris Lattner4d326fa2003-12-20 01:46:27 +0000915 Module &M = TheJIT->getModule();
Misha Brukmand69c1e62003-07-28 19:09:06 +0000916 if (Function *F = M.getNamedFunction(Name))
Chris Lattner4d326fa2003-12-20 01:46:27 +0000917 return TheJIT->getPointerToFunction(F);
918 return TheJIT->getPointerToNamedFunction(Name);
Misha Brukmand69c1e62003-07-28 19:09:06 +0000919 }
920}
Chris Lattnere993cc22006-05-11 23:08:08 +0000921
922// getPointerToFunctionOrStub - If the specified function has been
923// code-gen'd, return a pointer to the function. If not, compile it, or use
924// a stub to implement lazy compilation if available.
925//
926void *JIT::getPointerToFunctionOrStub(Function *F) {
927 // If we have already code generated the function, just return the address.
928 if (void *Addr = getPointerToGlobalIfAvailable(F))
929 return Addr;
930
931 // Get a stub if the target supports it
932 return getJITResolver(MCE).getFunctionStub(F);
933}
934
935/// freeMachineCodeForFunction - release machine code memory for given Function.
936///
937void JIT::freeMachineCodeForFunction(Function *F) {
938 // Delete translation for this from the ExecutionEngine, so it will get
939 // retranslated next time it is used.
940 updateGlobalMapping(F, 0);
941
942 // Free the actual memory for the function body and related stuff.
943 assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?");
944 dynamic_cast<JITEmitter*>(MCE)->deallocateMemForFunction(F);
945}
946