blob: fc746d6df82c30d0dc030527152f7c96c1950b66 [file] [log] [blame]
Chris Lattner166f2262004-11-22 22:00:25 +00001//===-- JITEmitter.cpp - Write machine code to executable memory ----------===//
Misha Brukmanf976c852005-04-21 22:55:34 +00002//
John Criswellb576c942003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
Misha Brukmanf976c852005-04-21 22:55:34 +00007//
John Criswellb576c942003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattnerbd199fb2002-12-24 00:01:05 +00009//
Chris Lattner5be478f2004-11-20 03:46:14 +000010// This file defines a MachineCodeEmitter object that is used by the JIT to
11// write machine code to memory and remember where relocatable values are.
Chris Lattnerbd199fb2002-12-24 00:01:05 +000012//
13//===----------------------------------------------------------------------===//
14
Chris Lattner3785fad2003-08-05 17:00:32 +000015#define DEBUG_TYPE "jit"
Chris Lattner4d326fa2003-12-20 01:46:27 +000016#include "JIT.h"
Chris Lattner2c0a6a12003-11-30 04:23:21 +000017#include "llvm/Constant.h"
18#include "llvm/Module.h"
Chris Lattner5b3a4552005-03-17 15:38:16 +000019#include "llvm/Type.h"
Chris Lattnerbd199fb2002-12-24 00:01:05 +000020#include "llvm/CodeGen/MachineCodeEmitter.h"
21#include "llvm/CodeGen/MachineFunction.h"
Chris Lattner1cc08382003-01-13 01:00:12 +000022#include "llvm/CodeGen/MachineConstantPool.h"
Nate Begeman37efe672006-04-22 18:53:45 +000023#include "llvm/CodeGen/MachineJumpTableInfo.h"
Chris Lattner5be478f2004-11-20 03:46:14 +000024#include "llvm/CodeGen/MachineRelocation.h"
Nate Begeman37efe672006-04-22 18:53:45 +000025#include "llvm/ExecutionEngine/GenericValue.h"
Chris Lattner1cc08382003-01-13 01:00:12 +000026#include "llvm/Target/TargetData.h"
Chris Lattner5be478f2004-11-20 03:46:14 +000027#include "llvm/Target/TargetJITInfo.h"
Jim Laskeyacd80ac2006-12-14 19:17:33 +000028#include "llvm/Target/TargetMachine.h"
Reid Spencer551ccae2004-09-01 22:55:40 +000029#include "llvm/Support/Debug.h"
Chris Lattnere7fd5532006-05-08 22:00:52 +000030#include "llvm/Support/MutexGuard.h"
Anton Korobeynikovfd58e6e2007-01-23 10:26:08 +000031#include "llvm/System/Disassembler.h"
Reid Spencer551ccae2004-09-01 22:55:40 +000032#include "llvm/ADT/Statistic.h"
Reid Spencer52b0ba62004-09-11 04:31:03 +000033#include "llvm/System/Memory.h"
Andrew Lenhartha00269b2005-07-29 23:40:16 +000034#include <algorithm>
Chris Lattnerc19aade2003-12-08 08:06:28 +000035using namespace llvm;
Brian Gaeked0fde302003-11-11 22:41:34 +000036
Chris Lattner36343732006-12-19 22:43:32 +000037STATISTIC(NumBytes, "Number of bytes of machine code compiled");
38STATISTIC(NumRelos, "Number of relocations applied");
39static JIT *TheJIT = 0;
Chris Lattner54266522004-11-20 23:57:07 +000040
41//===----------------------------------------------------------------------===//
42// JITMemoryManager code.
43//
44namespace {
Chris Lattnere993cc22006-05-11 23:08:08 +000045 /// MemoryRangeHeader - For a range of memory, this is the header that we put
46 /// on the block of memory. It is carefully crafted to be one word of memory.
47 /// Allocated blocks have just this header, free'd blocks have FreeRangeHeader
48 /// which starts with this.
49 struct FreeRangeHeader;
50 struct MemoryRangeHeader {
51 /// ThisAllocated - This is true if this block is currently allocated. If
52 /// not, this can be converted to a FreeRangeHeader.
53 intptr_t ThisAllocated : 1;
54
55 /// PrevAllocated - Keep track of whether the block immediately before us is
56 /// allocated. If not, the word immediately before this header is the size
57 /// of the previous block.
58 intptr_t PrevAllocated : 1;
59
60 /// BlockSize - This is the size in bytes of this memory block,
61 /// including this header.
62 uintptr_t BlockSize : (sizeof(intptr_t)*8 - 2);
63
64
65 /// getBlockAfter - Return the memory block immediately after this one.
66 ///
67 MemoryRangeHeader &getBlockAfter() const {
68 return *(MemoryRangeHeader*)((char*)this+BlockSize);
69 }
70
71 /// getFreeBlockBefore - If the block before this one is free, return it,
72 /// otherwise return null.
73 FreeRangeHeader *getFreeBlockBefore() const {
74 if (PrevAllocated) return 0;
75 intptr_t PrevSize = ((intptr_t *)this)[-1];
76 return (FreeRangeHeader*)((char*)this-PrevSize);
77 }
78
Chris Lattner9f3d1ba2006-05-11 23:56:57 +000079 /// FreeBlock - Turn an allocated block into a free block, adjusting
Chris Lattnere993cc22006-05-11 23:08:08 +000080 /// bits in the object headers, and adding an end of region memory block.
Chris Lattner9f3d1ba2006-05-11 23:56:57 +000081 FreeRangeHeader *FreeBlock(FreeRangeHeader *FreeList);
Chris Lattnere993cc22006-05-11 23:08:08 +000082
83 /// TrimAllocationToSize - If this allocated block is significantly larger
84 /// than NewSize, split it into two pieces (where the former is NewSize
85 /// bytes, including the header), and add the new block to the free list.
86 FreeRangeHeader *TrimAllocationToSize(FreeRangeHeader *FreeList,
87 uint64_t NewSize);
88 };
89
90 /// FreeRangeHeader - For a memory block that isn't already allocated, this
91 /// keeps track of the current block and has a pointer to the next free block.
92 /// Free blocks are kept on a circularly linked list.
93 struct FreeRangeHeader : public MemoryRangeHeader {
94 FreeRangeHeader *Prev;
95 FreeRangeHeader *Next;
96
97 /// getMinBlockSize - Get the minimum size for a memory block. Blocks
98 /// smaller than this size cannot be created.
99 static unsigned getMinBlockSize() {
100 return sizeof(FreeRangeHeader)+sizeof(intptr_t);
101 }
102
103 /// SetEndOfBlockSizeMarker - The word at the end of every free block is
104 /// known to be the size of the free block. Set it for this block.
105 void SetEndOfBlockSizeMarker() {
106 void *EndOfBlock = (char*)this + BlockSize;
107 ((intptr_t *)EndOfBlock)[-1] = BlockSize;
108 }
109
110 FreeRangeHeader *RemoveFromFreeList() {
111 assert(Next->Prev == this && Prev->Next == this && "Freelist broken!");
112 Next->Prev = Prev;
113 return Prev->Next = Next;
114 }
115
116 void AddToFreeList(FreeRangeHeader *FreeList) {
117 Next = FreeList;
118 Prev = FreeList->Prev;
119 Prev->Next = this;
120 Next->Prev = this;
121 }
122
123 /// GrowBlock - The block after this block just got deallocated. Merge it
124 /// into the current block.
125 void GrowBlock(uintptr_t NewSize);
126
127 /// AllocateBlock - Mark this entire block allocated, updating freelists
128 /// etc. This returns a pointer to the circular free-list.
129 FreeRangeHeader *AllocateBlock();
130 };
131}
132
133
134/// AllocateBlock - Mark this entire block allocated, updating freelists
135/// etc. This returns a pointer to the circular free-list.
136FreeRangeHeader *FreeRangeHeader::AllocateBlock() {
137 assert(!ThisAllocated && !getBlockAfter().PrevAllocated &&
138 "Cannot allocate an allocated block!");
139 // Mark this block allocated.
140 ThisAllocated = 1;
141 getBlockAfter().PrevAllocated = 1;
142
143 // Remove it from the free list.
144 return RemoveFromFreeList();
145}
146
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000147/// FreeBlock - Turn an allocated block into a free block, adjusting
Chris Lattnere993cc22006-05-11 23:08:08 +0000148/// bits in the object headers, and adding an end of region memory block.
149/// If possible, coallesce this block with neighboring blocks. Return the
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000150/// FreeRangeHeader to allocate from.
151FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000152 MemoryRangeHeader *FollowingBlock = &getBlockAfter();
153 assert(ThisAllocated && "This block is already allocated!");
154 assert(FollowingBlock->PrevAllocated && "Flags out of sync!");
155
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000156 FreeRangeHeader *FreeListToReturn = FreeList;
157
Chris Lattnere993cc22006-05-11 23:08:08 +0000158 // If the block after this one is free, merge it into this block.
159 if (!FollowingBlock->ThisAllocated) {
160 FreeRangeHeader &FollowingFreeBlock = *(FreeRangeHeader *)FollowingBlock;
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000161 // "FreeList" always needs to be a valid free block. If we're about to
162 // coallesce with it, update our notion of what the free list is.
163 if (&FollowingFreeBlock == FreeList) {
164 FreeList = FollowingFreeBlock.Next;
165 FreeListToReturn = 0;
166 assert(&FollowingFreeBlock != FreeList && "No tombstone block?");
167 }
Chris Lattnere993cc22006-05-11 23:08:08 +0000168 FollowingFreeBlock.RemoveFromFreeList();
169
170 // Include the following block into this one.
171 BlockSize += FollowingFreeBlock.BlockSize;
172 FollowingBlock = &FollowingFreeBlock.getBlockAfter();
173
174 // Tell the block after the block we are coallescing that this block is
175 // allocated.
176 FollowingBlock->PrevAllocated = 1;
177 }
178
179 assert(FollowingBlock->ThisAllocated && "Missed coallescing?");
180
181 if (FreeRangeHeader *PrevFreeBlock = getFreeBlockBefore()) {
182 PrevFreeBlock->GrowBlock(PrevFreeBlock->BlockSize + BlockSize);
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000183 return FreeListToReturn ? FreeListToReturn : PrevFreeBlock;
Chris Lattnere993cc22006-05-11 23:08:08 +0000184 }
185
186 // Otherwise, mark this block free.
187 FreeRangeHeader &FreeBlock = *(FreeRangeHeader*)this;
188 FollowingBlock->PrevAllocated = 0;
189 FreeBlock.ThisAllocated = 0;
190
191 // Link this into the linked list of free blocks.
192 FreeBlock.AddToFreeList(FreeList);
193
194 // Add a marker at the end of the block, indicating the size of this free
195 // block.
196 FreeBlock.SetEndOfBlockSizeMarker();
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000197 return FreeListToReturn ? FreeListToReturn : &FreeBlock;
Chris Lattnere993cc22006-05-11 23:08:08 +0000198}
199
200/// GrowBlock - The block after this block just got deallocated. Merge it
201/// into the current block.
202void FreeRangeHeader::GrowBlock(uintptr_t NewSize) {
203 assert(NewSize > BlockSize && "Not growing block?");
204 BlockSize = NewSize;
205 SetEndOfBlockSizeMarker();
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000206 getBlockAfter().PrevAllocated = 0;
Chris Lattnere993cc22006-05-11 23:08:08 +0000207}
208
209/// TrimAllocationToSize - If this allocated block is significantly larger
210/// than NewSize, split it into two pieces (where the former is NewSize
211/// bytes, including the header), and add the new block to the free list.
212FreeRangeHeader *MemoryRangeHeader::
213TrimAllocationToSize(FreeRangeHeader *FreeList, uint64_t NewSize) {
214 assert(ThisAllocated && getBlockAfter().PrevAllocated &&
215 "Cannot deallocate part of an allocated block!");
216
217 // Round up size for alignment of header.
218 unsigned HeaderAlign = __alignof(FreeRangeHeader);
219 NewSize = (NewSize+ (HeaderAlign-1)) & ~(HeaderAlign-1);
220
221 // Size is now the size of the block we will remove from the start of the
222 // current block.
223 assert(NewSize <= BlockSize &&
224 "Allocating more space from this block than exists!");
225
226 // If splitting this block will cause the remainder to be too small, do not
227 // split the block.
228 if (BlockSize <= NewSize+FreeRangeHeader::getMinBlockSize())
229 return FreeList;
230
231 // Otherwise, we splice the required number of bytes out of this block, form
232 // a new block immediately after it, then mark this block allocated.
233 MemoryRangeHeader &FormerNextBlock = getBlockAfter();
234
235 // Change the size of this block.
236 BlockSize = NewSize;
237
238 // Get the new block we just sliced out and turn it into a free block.
239 FreeRangeHeader &NewNextBlock = (FreeRangeHeader &)getBlockAfter();
240 NewNextBlock.BlockSize = (char*)&FormerNextBlock - (char*)&NewNextBlock;
241 NewNextBlock.ThisAllocated = 0;
242 NewNextBlock.PrevAllocated = 1;
243 NewNextBlock.SetEndOfBlockSizeMarker();
244 FormerNextBlock.PrevAllocated = 0;
245 NewNextBlock.AddToFreeList(FreeList);
246 return &NewNextBlock;
247}
248
249
250namespace {
Chris Lattner688506d2003-08-14 18:35:27 +0000251 /// JITMemoryManager - Manage memory for the JIT code generation in a logical,
252 /// sane way. This splits a large block of MAP_NORESERVE'd memory into two
253 /// sections, one for function stubs, one for the functions themselves. We
254 /// have to do this because we may need to emit a function stub while in the
255 /// middle of emitting a function, and we don't know how large the function we
256 /// are emitting is. This never bothers to release the memory, because when
257 /// we are ready to destroy the JIT, the program exits.
258 class JITMemoryManager {
Chris Lattnere6fdcbf2006-05-03 00:54:49 +0000259 std::vector<sys::MemoryBlock> Blocks; // Memory blocks allocated by the JIT
Chris Lattnere993cc22006-05-11 23:08:08 +0000260 FreeRangeHeader *FreeMemoryList; // Circular list of free blocks.
261
262 // When emitting code into a memory block, this is the block.
263 MemoryRangeHeader *CurBlock;
264
265 unsigned char *CurStubPtr, *StubBase;
Chris Lattnera726c7f2006-05-02 21:44:14 +0000266 unsigned char *GOTBase; // Target Specific reserved memory
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000267
Chris Lattnere993cc22006-05-11 23:08:08 +0000268 // Centralize memory block allocation.
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000269 sys::MemoryBlock getNewMemoryBlock(unsigned size);
Chris Lattnere993cc22006-05-11 23:08:08 +0000270
271 std::map<const Function*, MemoryRangeHeader*> FunctionBlocks;
Chris Lattner688506d2003-08-14 18:35:27 +0000272 public:
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000273 JITMemoryManager(bool useGOT);
Reid Spencer4af3da62004-12-13 16:04:04 +0000274 ~JITMemoryManager();
Misha Brukmanf976c852005-04-21 22:55:34 +0000275
Evan Cheng9a1e9b92006-11-16 20:04:54 +0000276 inline unsigned char *allocateStub(unsigned StubSize, unsigned Alignment);
Chris Lattnere993cc22006-05-11 23:08:08 +0000277
278 /// startFunctionBody - When a function starts, allocate a block of free
279 /// executable memory, returning a pointer to it and its actual size.
280 unsigned char *startFunctionBody(uintptr_t &ActualSize) {
281 CurBlock = FreeMemoryList;
282
283 // Allocate the entire memory block.
284 FreeMemoryList = FreeMemoryList->AllocateBlock();
285 ActualSize = CurBlock->BlockSize-sizeof(MemoryRangeHeader);
286 return (unsigned char *)(CurBlock+1);
287 }
288
289 /// endFunctionBody - The function F is now allocated, and takes the memory
290 /// in the range [FunctionStart,FunctionEnd).
291 void endFunctionBody(const Function *F, unsigned char *FunctionStart,
292 unsigned char *FunctionEnd) {
293 assert(FunctionEnd > FunctionStart);
294 assert(FunctionStart == (unsigned char *)(CurBlock+1) &&
295 "Mismatched function start/end!");
296
297 uintptr_t BlockSize = FunctionEnd - (unsigned char *)CurBlock;
298 FunctionBlocks[F] = CurBlock;
299
300 // Release the memory at the end of this block that isn't needed.
301 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
302 }
Chris Lattnera726c7f2006-05-02 21:44:14 +0000303
304 unsigned char *getGOTBase() const {
305 return GOTBase;
306 }
307 bool isManagingGOT() const {
308 return GOTBase != NULL;
309 }
Chris Lattnere993cc22006-05-11 23:08:08 +0000310
311 /// deallocateMemForFunction - Deallocate all memory for the specified
312 /// function body.
313 void deallocateMemForFunction(const Function *F) {
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000314 std::map<const Function*, MemoryRangeHeader*>::iterator
315 I = FunctionBlocks.find(F);
316 if (I == FunctionBlocks.end()) return;
Chris Lattnere993cc22006-05-11 23:08:08 +0000317
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000318 // Find the block that is allocated for this function.
319 MemoryRangeHeader *MemRange = I->second;
320 assert(MemRange->ThisAllocated && "Block isn't allocated!");
321
Chris Lattnera5f04192006-05-12 00:03:12 +0000322 // Fill the buffer with garbage!
323 DEBUG(memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange)));
324
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000325 // Free the memory.
326 FreeMemoryList = MemRange->FreeBlock(FreeMemoryList);
327
328 // Finally, remove this entry from FunctionBlocks.
329 FunctionBlocks.erase(I);
Chris Lattnere993cc22006-05-11 23:08:08 +0000330 }
Chris Lattner688506d2003-08-14 18:35:27 +0000331 };
332}
333
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000334JITMemoryManager::JITMemoryManager(bool useGOT) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000335 // Allocate a 16M block of memory for functions.
336 sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20);
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000337
Chris Lattnere993cc22006-05-11 23:08:08 +0000338 unsigned char *MemBase = reinterpret_cast<unsigned char*>(MemBlock.base());
Chris Lattner281a6012005-01-10 18:23:22 +0000339
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000340 // Allocate stubs backwards from the base, allocate functions forward
341 // from the base.
Chris Lattnere993cc22006-05-11 23:08:08 +0000342 StubBase = MemBase;
343 CurStubPtr = MemBase + 512*1024; // Use 512k for stubs, working backwards.
344
345 // We set up the memory chunk with 4 mem regions, like this:
346 // [ START
347 // [ Free #0 ] -> Large space to allocate functions from.
348 // [ Allocated #1 ] -> Tiny space to separate regions.
349 // [ Free #2 ] -> Tiny space so there is always at least 1 free block.
350 // [ Allocated #3 ] -> Tiny space to prevent looking past end of block.
351 // END ]
352 //
353 // The last three blocks are never deallocated or touched.
354
355 // Add MemoryRangeHeader to the end of the memory region, indicating that
356 // the space after the block of memory is allocated. This is block #3.
357 MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1;
358 Mem3->ThisAllocated = 1;
359 Mem3->PrevAllocated = 0;
360 Mem3->BlockSize = 0;
361
362 /// Add a tiny free region so that the free list always has one entry.
363 FreeRangeHeader *Mem2 =
364 (FreeRangeHeader *)(((char*)Mem3)-FreeRangeHeader::getMinBlockSize());
365 Mem2->ThisAllocated = 0;
366 Mem2->PrevAllocated = 1;
367 Mem2->BlockSize = FreeRangeHeader::getMinBlockSize();
368 Mem2->SetEndOfBlockSizeMarker();
369 Mem2->Prev = Mem2; // Mem2 *is* the free list for now.
370 Mem2->Next = Mem2;
371
372 /// Add a tiny allocated region so that Mem2 is never coallesced away.
373 MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1;
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000374 Mem1->ThisAllocated = 1;
375 Mem1->PrevAllocated = 0;
376 Mem1->BlockSize = (char*)Mem2 - (char*)Mem1;
Chris Lattnere993cc22006-05-11 23:08:08 +0000377
378 // Add a FreeRangeHeader to the start of the function body region, indicating
379 // that the space is free. Mark the previous block allocated so we never look
380 // at it.
381 FreeRangeHeader *Mem0 = (FreeRangeHeader*)CurStubPtr;
382 Mem0->ThisAllocated = 0;
383 Mem0->PrevAllocated = 1;
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000384 Mem0->BlockSize = (char*)Mem1-(char*)Mem0;
Chris Lattnere993cc22006-05-11 23:08:08 +0000385 Mem0->SetEndOfBlockSizeMarker();
386 Mem0->AddToFreeList(Mem2);
387
388 // Start out with the freelist pointing to Mem0.
389 FreeMemoryList = Mem0;
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000390
Chris Lattnerf5d438c2006-05-02 21:57:51 +0000391 // Allocate the GOT.
Andrew Lenharth2b3b89c2005-08-01 17:35:40 +0000392 GOTBase = NULL;
Chris Lattnerbbea1242006-05-12 18:10:12 +0000393 if (useGOT) GOTBase = new unsigned char[sizeof(void*) * 8192];
Chris Lattner688506d2003-08-14 18:35:27 +0000394}
395
Reid Spencer4af3da62004-12-13 16:04:04 +0000396JITMemoryManager::~JITMemoryManager() {
Chris Lattnere6fdcbf2006-05-03 00:54:49 +0000397 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
398 sys::Memory::ReleaseRWX(Blocks[i]);
Chris Lattnerbbea1242006-05-12 18:10:12 +0000399
400 delete[] GOTBase;
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000401 Blocks.clear();
Reid Spencer4af3da62004-12-13 16:04:04 +0000402}
403
Evan Cheng9a1e9b92006-11-16 20:04:54 +0000404unsigned char *JITMemoryManager::allocateStub(unsigned StubSize,
405 unsigned Alignment) {
Chris Lattner688506d2003-08-14 18:35:27 +0000406 CurStubPtr -= StubSize;
Evan Cheng9a1e9b92006-11-16 20:04:54 +0000407 CurStubPtr = (unsigned char*)(((intptr_t)CurStubPtr) &
408 ~(intptr_t)(Alignment-1));
Chris Lattnere993cc22006-05-11 23:08:08 +0000409 if (CurStubPtr < StubBase) {
Chris Lattnera726c7f2006-05-02 21:44:14 +0000410 // FIXME: allocate a new block
Bill Wendling832171c2006-12-07 20:04:42 +0000411 cerr << "JIT ran out of memory for function stubs!\n";
Chris Lattner688506d2003-08-14 18:35:27 +0000412 abort();
413 }
414 return CurStubPtr;
415}
416
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000417sys::MemoryBlock JITMemoryManager::getNewMemoryBlock(unsigned size) {
Chris Lattnerc1780d22006-07-07 17:31:41 +0000418 // Allocate a new block close to the last one.
419 const sys::MemoryBlock *BOld = Blocks.empty() ? 0 : &Blocks.front();
420 std::string ErrMsg;
421 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, BOld, &ErrMsg);
422 if (B.base() == 0) {
Bill Wendling832171c2006-12-07 20:04:42 +0000423 cerr << "Allocation failed when allocating new memory in the JIT\n";
424 cerr << ErrMsg << "\n";
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000425 abort();
426 }
Chris Lattnerc1780d22006-07-07 17:31:41 +0000427 Blocks.push_back(B);
428 return B;
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000429}
430
Chris Lattner54266522004-11-20 23:57:07 +0000431//===----------------------------------------------------------------------===//
432// JIT lazy compilation code.
433//
434namespace {
Reid Spenceree448632005-07-12 15:51:55 +0000435 class JITResolverState {
436 private:
437 /// FunctionToStubMap - Keep track of the stub created for a particular
438 /// function so that we can reuse them if necessary.
439 std::map<Function*, void*> FunctionToStubMap;
440
441 /// StubToFunctionMap - Keep track of the function that each stub
442 /// corresponds to.
443 std::map<void*, Function*> StubToFunctionMap;
Jeff Cohen00b168892005-07-27 06:12:32 +0000444
Reid Spenceree448632005-07-12 15:51:55 +0000445 public:
446 std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) {
447 assert(locked.holds(TheJIT->lock));
448 return FunctionToStubMap;
449 }
Jeff Cohen00b168892005-07-27 06:12:32 +0000450
Reid Spenceree448632005-07-12 15:51:55 +0000451 std::map<void*, Function*>& getStubToFunctionMap(const MutexGuard& locked) {
452 assert(locked.holds(TheJIT->lock));
453 return StubToFunctionMap;
454 }
455 };
Jeff Cohen00b168892005-07-27 06:12:32 +0000456
Chris Lattner54266522004-11-20 23:57:07 +0000457 /// JITResolver - Keep track of, and resolve, call sites for functions that
458 /// have not yet been compiled.
459 class JITResolver {
Chris Lattner5e225582004-11-21 03:37:42 +0000460 /// LazyResolverFn - The target lazy resolver function that we actually
461 /// rewrite instructions to use.
462 TargetJITInfo::LazyResolverFn LazyResolverFn;
463
Reid Spenceree448632005-07-12 15:51:55 +0000464 JITResolverState state;
Chris Lattner54266522004-11-20 23:57:07 +0000465
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000466 /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for
467 /// external functions.
468 std::map<void*, void*> ExternalFnToStubMap;
Andrew Lenharth6a974612005-07-28 12:44:13 +0000469
470 //map addresses to indexes in the GOT
471 std::map<void*, unsigned> revGOTMap;
472 unsigned nextGOTIndex;
473
Chris Lattnere7484012007-02-24 02:57:03 +0000474 static JITResolver *TheJITResolver;
Chris Lattner54266522004-11-20 23:57:07 +0000475 public:
Chris Lattnere7484012007-02-24 02:57:03 +0000476 JITResolver(JIT &jit) : nextGOTIndex(0) {
477 TheJIT = &jit;
478
479 LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn);
480 assert(TheJITResolver == 0 && "Multiple JIT resolvers?");
481 TheJITResolver = this;
482 }
483
484 ~JITResolver() {
485 TheJITResolver = 0;
Chris Lattner5e225582004-11-21 03:37:42 +0000486 }
Chris Lattner54266522004-11-20 23:57:07 +0000487
488 /// getFunctionStub - This returns a pointer to a function stub, creating
489 /// one on demand as needed.
490 void *getFunctionStub(Function *F);
491
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000492 /// getExternalFunctionStub - Return a stub for the function at the
493 /// specified address, created lazily on demand.
494 void *getExternalFunctionStub(void *FnAddr);
495
Chris Lattner5e225582004-11-21 03:37:42 +0000496 /// AddCallbackAtLocation - If the target is capable of rewriting an
497 /// instruction without the use of a stub, record the location of the use so
498 /// we know which function is being used at the location.
499 void *AddCallbackAtLocation(Function *F, void *Location) {
Reid Spenceree448632005-07-12 15:51:55 +0000500 MutexGuard locked(TheJIT->lock);
Chris Lattner5e225582004-11-21 03:37:42 +0000501 /// Get the target-specific JIT resolver function.
Reid Spenceree448632005-07-12 15:51:55 +0000502 state.getStubToFunctionMap(locked)[Location] = F;
Chris Lattner870286a2006-06-01 17:29:22 +0000503 return (void*)(intptr_t)LazyResolverFn;
Chris Lattner5e225582004-11-21 03:37:42 +0000504 }
505
Andrew Lenharth6a974612005-07-28 12:44:13 +0000506 /// getGOTIndexForAddress - Return a new or existing index in the GOT for
507 /// and address. This function only manages slots, it does not manage the
508 /// contents of the slots or the memory associated with the GOT.
509 unsigned getGOTIndexForAddr(void* addr);
510
Chris Lattner54266522004-11-20 23:57:07 +0000511 /// JITCompilerFn - This function is called to resolve a stub to a compiled
512 /// address. If the LLVM Function corresponding to the stub has not yet
513 /// been compiled, this function compiles it first.
514 static void *JITCompilerFn(void *Stub);
515 };
516}
517
Chris Lattnere7484012007-02-24 02:57:03 +0000518JITResolver *JITResolver::TheJITResolver = 0;
Chris Lattner54266522004-11-20 23:57:07 +0000519
Evan Cheng55b50532006-07-27 06:33:55 +0000520#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
521 defined(__APPLE__)
522extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
523#endif
524
525/// synchronizeICache - On some targets, the JIT emitted code must be
526/// explicitly refetched to ensure correct execution.
527static void synchronizeICache(const void *Addr, size_t len) {
528#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
529 defined(__APPLE__)
Jim Laskey2e9f3682006-07-27 13:40:34 +0000530 sys_icache_invalidate(Addr, len);
Evan Cheng55b50532006-07-27 06:33:55 +0000531#endif
532}
533
Chris Lattner54266522004-11-20 23:57:07 +0000534/// getFunctionStub - This returns a pointer to a function stub, creating
535/// one on demand as needed.
536void *JITResolver::getFunctionStub(Function *F) {
Reid Spenceree448632005-07-12 15:51:55 +0000537 MutexGuard locked(TheJIT->lock);
538
Chris Lattner54266522004-11-20 23:57:07 +0000539 // If we already have a stub for this function, recycle it.
Reid Spenceree448632005-07-12 15:51:55 +0000540 void *&Stub = state.getFunctionToStubMap(locked)[F];
Chris Lattner54266522004-11-20 23:57:07 +0000541 if (Stub) return Stub;
542
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000543 // Call the lazy resolver function unless we already KNOW it is an external
544 // function, in which case we just skip the lazy resolution step.
Chris Lattner870286a2006-06-01 17:29:22 +0000545 void *Actual = (void*)(intptr_t)LazyResolverFn;
Reid Spencer5cbf9852007-01-30 20:08:39 +0000546 if (F->isDeclaration() && !F->hasNotBeenReadFromBytecode())
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000547 Actual = TheJIT->getPointerToFunction(F);
Misha Brukmanf976c852005-04-21 22:55:34 +0000548
Chris Lattner54266522004-11-20 23:57:07 +0000549 // Otherwise, codegen a new stub. For now, the stub will call the lazy
550 // resolver function.
Chris Lattnere7484012007-02-24 02:57:03 +0000551 Stub = TheJIT->getJITInfo().emitFunctionStub(Actual,
552 *TheJIT->getCodeEmitter());
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000553
Chris Lattner870286a2006-06-01 17:29:22 +0000554 if (Actual != (void*)(intptr_t)LazyResolverFn) {
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000555 // If we are getting the stub for an external function, we really want the
556 // address of the stub in the GlobalAddressMap for the JIT, not the address
557 // of the external function.
558 TheJIT->updateGlobalMapping(F, Stub);
559 }
Chris Lattner54266522004-11-20 23:57:07 +0000560
Evan Cheng55fc2802006-07-25 20:40:54 +0000561 // Invalidate the icache if necessary.
Chris Lattnere7484012007-02-24 02:57:03 +0000562 synchronizeICache(Stub, TheJIT->getCodeEmitter()->getCurrentPCValue() -
563 (intptr_t)Stub);
Evan Cheng55fc2802006-07-25 20:40:54 +0000564
Bill Wendling832171c2006-12-07 20:04:42 +0000565 DOUT << "JIT: Stub emitted at [" << Stub << "] for function '"
566 << F->getName() << "'\n";
Chris Lattnercb479412004-11-21 03:44:32 +0000567
Chris Lattner54266522004-11-20 23:57:07 +0000568 // Finally, keep track of the stub-to-Function mapping so that the
569 // JITCompilerFn knows which function to compile!
Reid Spenceree448632005-07-12 15:51:55 +0000570 state.getStubToFunctionMap(locked)[Stub] = F;
Chris Lattner54266522004-11-20 23:57:07 +0000571 return Stub;
572}
573
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000574/// getExternalFunctionStub - Return a stub for the function at the
575/// specified address, created lazily on demand.
576void *JITResolver::getExternalFunctionStub(void *FnAddr) {
577 // If we already have a stub for this function, recycle it.
578 void *&Stub = ExternalFnToStubMap[FnAddr];
579 if (Stub) return Stub;
580
Chris Lattnere7484012007-02-24 02:57:03 +0000581 Stub = TheJIT->getJITInfo().emitFunctionStub(FnAddr,
582 *TheJIT->getCodeEmitter());
Evan Cheng55fc2802006-07-25 20:40:54 +0000583
584 // Invalidate the icache if necessary.
Chris Lattnere7484012007-02-24 02:57:03 +0000585 synchronizeICache(Stub, TheJIT->getCodeEmitter()->getCurrentPCValue() -
586 (intptr_t)Stub);
Evan Cheng55fc2802006-07-25 20:40:54 +0000587
Bill Wendling832171c2006-12-07 20:04:42 +0000588 DOUT << "JIT: Stub emitted at [" << Stub
589 << "] for external function at '" << FnAddr << "'\n";
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000590 return Stub;
591}
592
Andrew Lenharth6a974612005-07-28 12:44:13 +0000593unsigned JITResolver::getGOTIndexForAddr(void* addr) {
594 unsigned idx = revGOTMap[addr];
595 if (!idx) {
596 idx = ++nextGOTIndex;
597 revGOTMap[addr] = idx;
Bill Wendling832171c2006-12-07 20:04:42 +0000598 DOUT << "Adding GOT entry " << idx
599 << " for addr " << addr << "\n";
Andrew Lenharth6a974612005-07-28 12:44:13 +0000600 // ((void**)MemMgr.getGOTBase())[idx] = addr;
601 }
602 return idx;
603}
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000604
Chris Lattner54266522004-11-20 23:57:07 +0000605/// JITCompilerFn - This function is called when a lazy compilation stub has
606/// been entered. It looks up which function this stub corresponds to, compiles
607/// it if necessary, then returns the resultant function pointer.
608void *JITResolver::JITCompilerFn(void *Stub) {
Chris Lattnere7484012007-02-24 02:57:03 +0000609 JITResolver &JR = *TheJITResolver;
Misha Brukmanf976c852005-04-21 22:55:34 +0000610
Reid Spenceree448632005-07-12 15:51:55 +0000611 MutexGuard locked(TheJIT->lock);
612
Chris Lattner54266522004-11-20 23:57:07 +0000613 // The address given to us for the stub may not be exactly right, it might be
614 // a little bit after the stub. As such, use upper_bound to find it.
615 std::map<void*, Function*>::iterator I =
Reid Spenceree448632005-07-12 15:51:55 +0000616 JR.state.getStubToFunctionMap(locked).upper_bound(Stub);
Chris Lattner21998772006-01-07 06:20:51 +0000617 assert(I != JR.state.getStubToFunctionMap(locked).begin() &&
618 "This is not a known stub!");
Chris Lattner54266522004-11-20 23:57:07 +0000619 Function *F = (--I)->second;
620
Evan Cheng9da60f92007-06-30 00:10:37 +0000621 // If we have already code generated the function, just return the address.
622 void *Result = TheJIT->getPointerToGlobalIfAvailable(F);
Chris Lattner9cab56d2006-11-09 19:32:13 +0000623
Evan Cheng9da60f92007-06-30 00:10:37 +0000624 if (!Result) {
625 // Otherwise we don't have it, do lazy compilation now.
626
627 // If lazy compilation is disabled, emit a useful error message and abort.
628 if (TheJIT->isLazyCompilationDisabled()) {
629 cerr << "LLVM JIT requested to do lazy compilation of function '"
630 << F->getName() << "' when lazy compiles are disabled!\n";
631 abort();
632 }
633
634 // We might like to remove the stub from the StubToFunction map.
635 // We can't do that! Multiple threads could be stuck, waiting to acquire the
636 // lock above. As soon as the 1st function finishes compiling the function,
637 // the next one will be released, and needs to be able to find the function
638 // it needs to call.
639 //JR.state.getStubToFunctionMap(locked).erase(I);
Chris Lattner54266522004-11-20 23:57:07 +0000640
Evan Cheng9da60f92007-06-30 00:10:37 +0000641 DOUT << "JIT: Lazily resolving function '" << F->getName()
642 << "' In stub ptr = " << Stub << " actual ptr = "
643 << I->first << "\n";
Chris Lattner54266522004-11-20 23:57:07 +0000644
Evan Cheng9da60f92007-06-30 00:10:37 +0000645 Result = TheJIT->getPointerToFunction(F);
646 }
Chris Lattner54266522004-11-20 23:57:07 +0000647
648 // We don't need to reuse this stub in the future, as F is now compiled.
Reid Spenceree448632005-07-12 15:51:55 +0000649 JR.state.getFunctionToStubMap(locked).erase(F);
Chris Lattner54266522004-11-20 23:57:07 +0000650
651 // FIXME: We could rewrite all references to this stub if we knew them.
Andrew Lenharth6a974612005-07-28 12:44:13 +0000652
Jeff Cohend29b6aa2005-07-30 18:33:25 +0000653 // What we will do is set the compiled function address to map to the
654 // same GOT entry as the stub so that later clients may update the GOT
Andrew Lenharth6a974612005-07-28 12:44:13 +0000655 // if they see it still using the stub address.
656 // Note: this is done so the Resolver doesn't have to manage GOT memory
657 // Do this without allocating map space if the target isn't using a GOT
658 if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end())
659 JR.revGOTMap[Result] = JR.revGOTMap[Stub];
660
Chris Lattner54266522004-11-20 23:57:07 +0000661 return Result;
662}
Chris Lattner688506d2003-08-14 18:35:27 +0000663
664
Chris Lattner54266522004-11-20 23:57:07 +0000665//===----------------------------------------------------------------------===//
Chris Lattner166f2262004-11-22 22:00:25 +0000666// JITEmitter code.
Chris Lattner54266522004-11-20 23:57:07 +0000667//
Chris Lattner688506d2003-08-14 18:35:27 +0000668namespace {
Chris Lattner166f2262004-11-22 22:00:25 +0000669 /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
670 /// used to output functions to memory for execution.
671 class JITEmitter : public MachineCodeEmitter {
Chris Lattner688506d2003-08-14 18:35:27 +0000672 JITMemoryManager MemMgr;
673
Chris Lattner6125fdd2003-05-09 03:30:07 +0000674 // When outputting a function stub in the context of some other function, we
Chris Lattner43b429b2006-05-02 18:27:26 +0000675 // save BufferBegin/BufferEnd/CurBufferPtr here.
676 unsigned char *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000677
Chris Lattner5be478f2004-11-20 03:46:14 +0000678 /// Relocations - These are the relocations that the function needs, as
679 /// emitted.
680 std::vector<MachineRelocation> Relocations;
Chris Lattnerb4432f32006-05-03 17:10:41 +0000681
682 /// MBBLocations - This vector is a mapping from MBB ID's to their address.
683 /// It is filled in by the StartMachineBasicBlock callback and queried by
684 /// the getMachineBasicBlockAddress callback.
685 std::vector<intptr_t> MBBLocations;
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000686
Chris Lattner239862c2006-02-09 04:49:59 +0000687 /// ConstantPool - The constant pool for the current function.
688 ///
689 MachineConstantPool *ConstantPool;
690
691 /// ConstantPoolBase - A pointer to the first entry in the constant pool.
692 ///
693 void *ConstantPoolBase;
Nate Begeman37efe672006-04-22 18:53:45 +0000694
Nate Begeman019f8512006-09-10 23:03:44 +0000695 /// JumpTable - The jump tables for the current function.
Nate Begeman37efe672006-04-22 18:53:45 +0000696 ///
697 MachineJumpTableInfo *JumpTable;
698
699 /// JumpTableBase - A pointer to the first entry in the jump table.
700 ///
701 void *JumpTableBase;
Chris Lattnere7484012007-02-24 02:57:03 +0000702
703 /// Resolver - This contains info about the currently resolved functions.
704 JITResolver Resolver;
705 public:
706 JITEmitter(JIT &jit)
707 : MemMgr(jit.getJITInfo().needsGOT()), Resolver(jit) {
Bill Wendling832171c2006-12-07 20:04:42 +0000708 if (MemMgr.isManagingGOT()) DOUT << "JIT is managing a GOT\n";
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000709 }
Chris Lattnere7484012007-02-24 02:57:03 +0000710
711 JITResolver &getJITResolver() { return Resolver; }
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000712
713 virtual void startFunction(MachineFunction &F);
Chris Lattner43b429b2006-05-02 18:27:26 +0000714 virtual bool finishFunction(MachineFunction &F);
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000715
716 void emitConstantPool(MachineConstantPool *MCP);
717 void initJumpTableInfo(MachineJumpTableInfo *MJTI);
Jim Laskeyb92767a2006-12-14 22:53:42 +0000718 void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000719
Evan Cheng9a1e9b92006-11-16 20:04:54 +0000720 virtual void startFunctionStub(unsigned StubSize, unsigned Alignment = 1);
Chris Lattner54266522004-11-20 23:57:07 +0000721 virtual void* finishFunctionStub(const Function *F);
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000722
Chris Lattner5be478f2004-11-20 03:46:14 +0000723 virtual void addRelocation(const MachineRelocation &MR) {
724 Relocations.push_back(MR);
725 }
Chris Lattnerb4432f32006-05-03 17:10:41 +0000726
727 virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
728 if (MBBLocations.size() <= (unsigned)MBB->getNumber())
729 MBBLocations.resize((MBB->getNumber()+1)*2);
730 MBBLocations[MBB->getNumber()] = getCurrentPCValue();
731 }
Chris Lattner5be478f2004-11-20 03:46:14 +0000732
Chris Lattnerb4432f32006-05-03 17:10:41 +0000733 virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const;
734 virtual intptr_t getJumpTableEntryAddress(unsigned Entry) const;
735
736 virtual intptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
737 assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
738 MBBLocations[MBB->getNumber()] && "MBB not emitted!");
739 return MBBLocations[MBB->getNumber()];
740 }
741
Chris Lattnere993cc22006-05-11 23:08:08 +0000742 /// deallocateMemForFunction - Deallocate all memory for the specified
743 /// function body.
744 void deallocateMemForFunction(Function *F) {
745 MemMgr.deallocateMemForFunction(F);
746 }
Chris Lattner54266522004-11-20 23:57:07 +0000747 private:
Chris Lattner5e225582004-11-21 03:37:42 +0000748 void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub);
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000749 };
750}
751
Chris Lattner166f2262004-11-22 22:00:25 +0000752void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
753 bool DoesntNeedStub) {
Chris Lattner54266522004-11-20 23:57:07 +0000754 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
755 /// FIXME: If we straightened things out, this could actually emit the
756 /// global immediately instead of queuing it for codegen later!
Chris Lattner54266522004-11-20 23:57:07 +0000757 return TheJIT->getOrEmitGlobalVariable(GV);
758 }
759
760 // If we have already compiled the function, return a pointer to its body.
761 Function *F = cast<Function>(V);
762 void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
763 if (ResultPtr) return ResultPtr;
764
Reid Spencer5cbf9852007-01-30 20:08:39 +0000765 if (F->isDeclaration() && !F->hasNotBeenReadFromBytecode()) {
Chris Lattner54266522004-11-20 23:57:07 +0000766 // If this is an external function pointer, we can force the JIT to
767 // 'compile' it, which really just adds it to the map.
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000768 if (DoesntNeedStub)
769 return TheJIT->getPointerToFunction(F);
770
Chris Lattnere7484012007-02-24 02:57:03 +0000771 return Resolver.getFunctionStub(F);
Chris Lattner54266522004-11-20 23:57:07 +0000772 }
773
Chris Lattner5e225582004-11-21 03:37:42 +0000774 // Okay, the function has not been compiled yet, if the target callback
775 // mechanism is capable of rewriting the instruction directly, prefer to do
776 // that instead of emitting a stub.
777 if (DoesntNeedStub)
Chris Lattnere7484012007-02-24 02:57:03 +0000778 return Resolver.AddCallbackAtLocation(F, Reference);
Chris Lattner5e225582004-11-21 03:37:42 +0000779
Chris Lattner54266522004-11-20 23:57:07 +0000780 // Otherwise, we have to emit a lazy resolving stub.
Chris Lattnere7484012007-02-24 02:57:03 +0000781 return Resolver.getFunctionStub(F);
Chris Lattner54266522004-11-20 23:57:07 +0000782}
783
Chris Lattner166f2262004-11-22 22:00:25 +0000784void JITEmitter::startFunction(MachineFunction &F) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000785 uintptr_t ActualSize;
786 BufferBegin = CurBufferPtr = MemMgr.startFunctionBody(ActualSize);
787 BufferEnd = BufferBegin+ActualSize;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000788
Evan Cheng9a1e9b92006-11-16 20:04:54 +0000789 // Ensure the constant pool/jump table info is at least 4-byte aligned.
790 emitAlignment(16);
791
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000792 emitConstantPool(F.getConstantPool());
793 initJumpTableInfo(F.getJumpTableInfo());
794
795 // About to start emitting the machine code for the function.
Chris Lattner0eb4d6b2006-05-03 01:03:20 +0000796 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000797 TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr);
Evan Cheng55fc2802006-07-25 20:40:54 +0000798
Chris Lattnerb4432f32006-05-03 17:10:41 +0000799 MBBLocations.clear();
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000800}
801
Chris Lattner43b429b2006-05-02 18:27:26 +0000802bool JITEmitter::finishFunction(MachineFunction &F) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000803 if (CurBufferPtr == BufferEnd) {
804 // FIXME: Allocate more space, then try again.
Bill Wendling832171c2006-12-07 20:04:42 +0000805 cerr << "JIT: Ran out of space for generated machine code!\n";
Chris Lattnere993cc22006-05-11 23:08:08 +0000806 abort();
807 }
808
Jim Laskeyb92767a2006-12-14 22:53:42 +0000809 emitJumpTableInfo(F.getJumpTableInfo());
Chris Lattnerb4432f32006-05-03 17:10:41 +0000810
Chris Lattnera8279532006-06-16 18:09:26 +0000811 // FnStart is the start of the text, not the start of the constant pool and
812 // other per-function data.
813 unsigned char *FnStart =
814 (unsigned char *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction());
815 unsigned char *FnEnd = CurBufferPtr;
816
817 MemMgr.endFunctionBody(F.getFunction(), BufferBegin, FnEnd);
818 NumBytes += FnEnd-FnStart;
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000819
Chris Lattner5be478f2004-11-20 03:46:14 +0000820 if (!Relocations.empty()) {
Chris Lattnere884dc22005-07-20 16:29:20 +0000821 NumRelos += Relocations.size();
822
Chris Lattner5be478f2004-11-20 03:46:14 +0000823 // Resolve the relocations to concrete pointers.
824 for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
825 MachineRelocation &MR = Relocations[i];
826 void *ResultPtr;
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000827 if (MR.isString()) {
Chris Lattner5be478f2004-11-20 03:46:14 +0000828 ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString());
Misha Brukmanf976c852005-04-21 22:55:34 +0000829
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000830 // If the target REALLY wants a stub for this function, emit it now.
831 if (!MR.doesntNeedFunctionStub())
Chris Lattnere7484012007-02-24 02:57:03 +0000832 ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000833 } else if (MR.isGlobalValue()) {
Chris Lattner5e225582004-11-21 03:37:42 +0000834 ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
Chris Lattner43b429b2006-05-02 18:27:26 +0000835 BufferBegin+MR.getMachineCodeOffset(),
Chris Lattner5e225582004-11-21 03:37:42 +0000836 MR.doesntNeedFunctionStub());
Evan Chengf141cc42006-07-27 18:21:10 +0000837 } else if (MR.isBasicBlock()) {
838 ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
Jim Laskeyacd80ac2006-12-14 19:17:33 +0000839 } else if (MR.isConstantPoolIndex()) {
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000840 ResultPtr=(void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
Evan Cheng52b510b2006-06-23 01:02:37 +0000841 } else {
842 assert(MR.isJumpTableIndex());
843 ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000844 }
Jeff Cohen00b168892005-07-27 06:12:32 +0000845
Chris Lattner5be478f2004-11-20 03:46:14 +0000846 MR.setResultPointer(ResultPtr);
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000847
Andrew Lenharth6a974612005-07-28 12:44:13 +0000848 // if we are managing the GOT and the relocation wants an index,
849 // give it one
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000850 if (MemMgr.isManagingGOT() && MR.isGOTRelative()) {
Chris Lattnere7484012007-02-24 02:57:03 +0000851 unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr);
Andrew Lenharth6a974612005-07-28 12:44:13 +0000852 MR.setGOTIndex(idx);
853 if (((void**)MemMgr.getGOTBase())[idx] != ResultPtr) {
Bill Wendling832171c2006-12-07 20:04:42 +0000854 DOUT << "GOT was out of date for " << ResultPtr
855 << " pointing at " << ((void**)MemMgr.getGOTBase())[idx]
856 << "\n";
Andrew Lenharth6a974612005-07-28 12:44:13 +0000857 ((void**)MemMgr.getGOTBase())[idx] = ResultPtr;
858 }
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000859 }
Chris Lattner5be478f2004-11-20 03:46:14 +0000860 }
861
Chris Lattner43b429b2006-05-02 18:27:26 +0000862 TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000863 Relocations.size(), MemMgr.getGOTBase());
Chris Lattner5be478f2004-11-20 03:46:14 +0000864 }
865
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000866 // Update the GOT entry for F to point to the new code.
Anton Korobeynikov8cd4c3e2007-01-19 17:25:17 +0000867 if (MemMgr.isManagingGOT()) {
Chris Lattnere7484012007-02-24 02:57:03 +0000868 unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin);
Chris Lattner43b429b2006-05-02 18:27:26 +0000869 if (((void**)MemMgr.getGOTBase())[idx] != (void*)BufferBegin) {
Bill Wendling832171c2006-12-07 20:04:42 +0000870 DOUT << "GOT was out of date for " << (void*)BufferBegin
871 << " pointing at " << ((void**)MemMgr.getGOTBase())[idx] << "\n";
Chris Lattner43b429b2006-05-02 18:27:26 +0000872 ((void**)MemMgr.getGOTBase())[idx] = (void*)BufferBegin;
Andrew Lenharth6a974612005-07-28 12:44:13 +0000873 }
874 }
875
Evan Cheng55fc2802006-07-25 20:40:54 +0000876 // Invalidate the icache if necessary.
Evan Cheng55b50532006-07-27 06:33:55 +0000877 synchronizeICache(FnStart, FnEnd-FnStart);
Evan Cheng55fc2802006-07-25 20:40:54 +0000878
Bill Wendling832171c2006-12-07 20:04:42 +0000879 DOUT << "JIT: Finished CodeGen of [" << (void*)FnStart
880 << "] Function: " << F.getFunction()->getName()
881 << ": " << (FnEnd-FnStart) << " bytes of text, "
882 << Relocations.size() << " relocations\n";
Chris Lattner5be478f2004-11-20 03:46:14 +0000883 Relocations.clear();
Anton Korobeynikov8cd4c3e2007-01-19 17:25:17 +0000884
Chris Lattnerc5633c22007-01-20 20:51:43 +0000885#ifndef NDEBUG
Anton Korobeynikovc6551ff2007-03-06 05:32:48 +0000886 if (sys::hasDisassembler())
887 DOUT << "Disassembled code:\n"
888 << sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart);
Chris Lattnerc5633c22007-01-20 20:51:43 +0000889#endif
Anton Korobeynikov8cd4c3e2007-01-19 17:25:17 +0000890
Chris Lattner43b429b2006-05-02 18:27:26 +0000891 return false;
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000892}
893
Chris Lattner166f2262004-11-22 22:00:25 +0000894void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
Chris Lattnerfa77d432006-02-09 04:22:52 +0000895 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
Chris Lattner2c0a6a12003-11-30 04:23:21 +0000896 if (Constants.empty()) return;
897
Evan Chengcd5731d2006-09-12 20:59:59 +0000898 MachineConstantPoolEntry CPE = Constants.back();
899 unsigned Size = CPE.Offset;
900 const Type *Ty = CPE.isMachineConstantPoolEntry()
Chris Lattner8a650092006-09-13 16:21:10 +0000901 ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType();
Evan Chengcd5731d2006-09-12 20:59:59 +0000902 Size += TheJIT->getTargetData()->getTypeSize(Ty);
Chris Lattner2c0a6a12003-11-30 04:23:21 +0000903
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000904 ConstantPoolBase = allocateSpace(Size, 1 << MCP->getConstantPoolAlignment());
Chris Lattner239862c2006-02-09 04:49:59 +0000905 ConstantPool = MCP;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000906
907 if (ConstantPoolBase == 0) return; // Buffer overflow.
908
Chris Lattner239862c2006-02-09 04:49:59 +0000909 // Initialize the memory for all of the constant pool entries.
Chris Lattner3029f922006-02-09 04:46:04 +0000910 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
Chris Lattner239862c2006-02-09 04:49:59 +0000911 void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset;
Evan Chengcd5731d2006-09-12 20:59:59 +0000912 if (Constants[i].isMachineConstantPoolEntry()) {
913 // FIXME: add support to lower machine constant pool values into bytes!
Bill Wendling832171c2006-12-07 20:04:42 +0000914 cerr << "Initialize memory with machine specific constant pool entry"
915 << " has not been implemented!\n";
Evan Chengcd5731d2006-09-12 20:59:59 +0000916 abort();
917 }
918 TheJIT->InitializeMemory(Constants[i].Val.ConstVal, CAddr);
Chris Lattner1cc08382003-01-13 01:00:12 +0000919 }
920}
921
Nate Begeman37efe672006-04-22 18:53:45 +0000922void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
923 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
924 if (JT.empty()) return;
925
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000926 unsigned NumEntries = 0;
Nate Begeman37efe672006-04-22 18:53:45 +0000927 for (unsigned i = 0, e = JT.size(); i != e; ++i)
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000928 NumEntries += JT[i].MBBs.size();
929
930 unsigned EntrySize = MJTI->getEntrySize();
931
Nate Begeman37efe672006-04-22 18:53:45 +0000932 // Just allocate space for all the jump tables now. We will fix up the actual
933 // MBB entries in the tables after we emit the code for each block, since then
934 // we will know the final locations of the MBBs in memory.
935 JumpTable = MJTI;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000936 JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment());
Nate Begeman37efe672006-04-22 18:53:45 +0000937}
938
Jim Laskeyb92767a2006-12-14 22:53:42 +0000939void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
Nate Begeman37efe672006-04-22 18:53:45 +0000940 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000941 if (JT.empty() || JumpTableBase == 0) return;
Nate Begeman37efe672006-04-22 18:53:45 +0000942
Jim Laskeyb92767a2006-12-14 22:53:42 +0000943 if (TargetMachine::getRelocationModel() == Reloc::PIC_) {
Jim Laskeyacd80ac2006-12-14 19:17:33 +0000944 assert(MJTI->getEntrySize() == 4 && "Cross JIT'ing?");
945 // For each jump table, place the offset from the beginning of the table
946 // to the target address.
947 int *SlotPtr = (int*)JumpTableBase;
Chris Lattner32ca55f2006-05-03 00:13:06 +0000948
Jim Laskeyacd80ac2006-12-14 19:17:33 +0000949 for (unsigned i = 0, e = JT.size(); i != e; ++i) {
950 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
951 // Store the offset of the basic block for this jump table slot in the
952 // memory we allocated for the jump table in 'initJumpTableInfo'
953 intptr_t Base = (intptr_t)SlotPtr;
954 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
955 *SlotPtr++ = (intptr_t)getMachineBasicBlockAddress(MBBs[mi]) - Base;
956 }
957 } else {
958 assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?");
959
960 // For each jump table, map each target in the jump table to the address of
961 // an emitted MachineBasicBlock.
962 intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
963
964 for (unsigned i = 0, e = JT.size(); i != e; ++i) {
965 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
966 // Store the address of the basic block for this jump table slot in the
967 // memory we allocated for the jump table in 'initJumpTableInfo'
968 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
969 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]);
970 }
Nate Begeman37efe672006-04-22 18:53:45 +0000971 }
972}
973
Evan Cheng9a1e9b92006-11-16 20:04:54 +0000974void JITEmitter::startFunctionStub(unsigned StubSize, unsigned Alignment) {
Chris Lattner43b429b2006-05-02 18:27:26 +0000975 SavedBufferBegin = BufferBegin;
976 SavedBufferEnd = BufferEnd;
977 SavedCurBufferPtr = CurBufferPtr;
978
Evan Cheng9a1e9b92006-11-16 20:04:54 +0000979 BufferBegin = CurBufferPtr = MemMgr.allocateStub(StubSize, Alignment);
Chris Lattner43b429b2006-05-02 18:27:26 +0000980 BufferEnd = BufferBegin+StubSize+1;
Chris Lattner6125fdd2003-05-09 03:30:07 +0000981}
982
Chris Lattner166f2262004-11-22 22:00:25 +0000983void *JITEmitter::finishFunctionStub(const Function *F) {
Chris Lattner43b429b2006-05-02 18:27:26 +0000984 NumBytes += getCurrentPCOffset();
985 std::swap(SavedBufferBegin, BufferBegin);
986 BufferEnd = SavedBufferEnd;
987 CurBufferPtr = SavedCurBufferPtr;
988 return SavedBufferBegin;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000989}
990
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000991// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry
992// in the constant pool that was last emitted with the 'emitConstantPool'
993// method.
994//
Chris Lattnerb4432f32006-05-03 17:10:41 +0000995intptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
Chris Lattner239862c2006-02-09 04:49:59 +0000996 assert(ConstantNum < ConstantPool->getConstants().size() &&
Misha Brukman3c944972005-04-22 04:08:30 +0000997 "Invalid ConstantPoolIndex!");
Chris Lattner239862c2006-02-09 04:49:59 +0000998 return (intptr_t)ConstantPoolBase +
999 ConstantPool->getConstants()[ConstantNum].Offset;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +00001000}
1001
Nate Begeman37efe672006-04-22 18:53:45 +00001002// getJumpTableEntryAddress - Return the address of the JumpTable with index
1003// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo'
1004//
Chris Lattnerb4432f32006-05-03 17:10:41 +00001005intptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
Nate Begeman37efe672006-04-22 18:53:45 +00001006 const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
1007 assert(Index < JT.size() && "Invalid jump table index!");
1008
1009 unsigned Offset = 0;
1010 unsigned EntrySize = JumpTable->getEntrySize();
1011
1012 for (unsigned i = 0; i < Index; ++i)
Jim Laskeyacd80ac2006-12-14 19:17:33 +00001013 Offset += JT[i].MBBs.size();
1014
1015 Offset *= EntrySize;
Nate Begeman37efe672006-04-22 18:53:45 +00001016
Nate Begemanc34b2272006-04-25 17:46:32 +00001017 return (intptr_t)((char *)JumpTableBase + Offset);
Nate Begeman37efe672006-04-22 18:53:45 +00001018}
1019
Chris Lattnere993cc22006-05-11 23:08:08 +00001020//===----------------------------------------------------------------------===//
1021// Public interface to this file
1022//===----------------------------------------------------------------------===//
1023
1024MachineCodeEmitter *JIT::createEmitter(JIT &jit) {
1025 return new JITEmitter(jit);
1026}
1027
Misha Brukmand69c1e62003-07-28 19:09:06 +00001028// getPointerToNamedFunction - This function is used as a global wrapper to
Chris Lattner4d326fa2003-12-20 01:46:27 +00001029// JIT::getPointerToNamedFunction for the purpose of resolving symbols when
Misha Brukmand69c1e62003-07-28 19:09:06 +00001030// bugpoint is debugging the JIT. In that scenario, we are loading an .so and
1031// need to resolve function(s) that are being mis-codegenerated, so we need to
1032// resolve their addresses at runtime, and this is the way to do it.
1033extern "C" {
1034 void *getPointerToNamedFunction(const char *Name) {
Chris Lattnerfe854032006-08-16 01:24:12 +00001035 if (Function *F = TheJIT->FindFunctionNamed(Name))
Chris Lattner4d326fa2003-12-20 01:46:27 +00001036 return TheJIT->getPointerToFunction(F);
1037 return TheJIT->getPointerToNamedFunction(Name);
Misha Brukmand69c1e62003-07-28 19:09:06 +00001038 }
1039}
Chris Lattnere993cc22006-05-11 23:08:08 +00001040
1041// getPointerToFunctionOrStub - If the specified function has been
1042// code-gen'd, return a pointer to the function. If not, compile it, or use
1043// a stub to implement lazy compilation if available.
1044//
1045void *JIT::getPointerToFunctionOrStub(Function *F) {
1046 // If we have already code generated the function, just return the address.
1047 if (void *Addr = getPointerToGlobalIfAvailable(F))
1048 return Addr;
1049
Chris Lattnere7484012007-02-24 02:57:03 +00001050 // Get a stub if the target supports it.
1051 assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?");
1052 JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter());
1053 return JE->getJITResolver().getFunctionStub(F);
Chris Lattnere993cc22006-05-11 23:08:08 +00001054}
1055
1056/// freeMachineCodeForFunction - release machine code memory for given Function.
1057///
1058void JIT::freeMachineCodeForFunction(Function *F) {
1059 // Delete translation for this from the ExecutionEngine, so it will get
1060 // retranslated next time it is used.
1061 updateGlobalMapping(F, 0);
1062
1063 // Free the actual memory for the function body and related stuff.
1064 assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?");
Chris Lattnere7484012007-02-24 02:57:03 +00001065 static_cast<JITEmitter*>(MCE)->deallocateMemForFunction(F);
Chris Lattnere993cc22006-05-11 23:08:08 +00001066}
1067