blob: efc2062a544e9a3ba359d6a42d39dc913077a8c5 [file] [log] [blame]
Chris Lattner166f2262004-11-22 22:00:25 +00001//===-- JITEmitter.cpp - Write machine code to executable memory ----------===//
Misha Brukmanf976c852005-04-21 22:55:34 +00002//
John Criswellb576c942003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
Misha Brukmanf976c852005-04-21 22:55:34 +00007//
John Criswellb576c942003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattnerbd199fb2002-12-24 00:01:05 +00009//
Chris Lattner5be478f2004-11-20 03:46:14 +000010// This file defines a MachineCodeEmitter object that is used by the JIT to
11// write machine code to memory and remember where relocatable values are.
Chris Lattnerbd199fb2002-12-24 00:01:05 +000012//
13//===----------------------------------------------------------------------===//
14
Chris Lattner3785fad2003-08-05 17:00:32 +000015#define DEBUG_TYPE "jit"
Chris Lattner4d326fa2003-12-20 01:46:27 +000016#include "JIT.h"
Chris Lattner2c0a6a12003-11-30 04:23:21 +000017#include "llvm/Constant.h"
18#include "llvm/Module.h"
Chris Lattner5b3a4552005-03-17 15:38:16 +000019#include "llvm/Type.h"
Chris Lattnerbd199fb2002-12-24 00:01:05 +000020#include "llvm/CodeGen/MachineCodeEmitter.h"
21#include "llvm/CodeGen/MachineFunction.h"
Chris Lattner1cc08382003-01-13 01:00:12 +000022#include "llvm/CodeGen/MachineConstantPool.h"
Nate Begeman37efe672006-04-22 18:53:45 +000023#include "llvm/CodeGen/MachineJumpTableInfo.h"
Chris Lattner5be478f2004-11-20 03:46:14 +000024#include "llvm/CodeGen/MachineRelocation.h"
Nate Begeman37efe672006-04-22 18:53:45 +000025#include "llvm/ExecutionEngine/GenericValue.h"
Chris Lattner1cc08382003-01-13 01:00:12 +000026#include "llvm/Target/TargetData.h"
Chris Lattner5be478f2004-11-20 03:46:14 +000027#include "llvm/Target/TargetJITInfo.h"
Reid Spencer551ccae2004-09-01 22:55:40 +000028#include "llvm/Support/Debug.h"
Chris Lattnere7fd5532006-05-08 22:00:52 +000029#include "llvm/Support/MutexGuard.h"
Reid Spencer551ccae2004-09-01 22:55:40 +000030#include "llvm/ADT/Statistic.h"
Reid Spencer52b0ba62004-09-11 04:31:03 +000031#include "llvm/System/Memory.h"
Andrew Lenhartha00269b2005-07-29 23:40:16 +000032#include <algorithm>
Chris Lattnerca261802006-01-22 23:41:42 +000033#include <iostream>
Chris Lattnerc19aade2003-12-08 08:06:28 +000034using namespace llvm;
Brian Gaeked0fde302003-11-11 22:41:34 +000035
Chris Lattnerbd199fb2002-12-24 00:01:05 +000036namespace {
Chris Lattnere7386562003-10-20 05:45:49 +000037 Statistic<> NumBytes("jit", "Number of bytes of machine code compiled");
Chris Lattnere884dc22005-07-20 16:29:20 +000038 Statistic<> NumRelos("jit", "Number of relocations applied");
Chris Lattner4d326fa2003-12-20 01:46:27 +000039 JIT *TheJIT = 0;
Chris Lattner54266522004-11-20 23:57:07 +000040}
Chris Lattnerbd199fb2002-12-24 00:01:05 +000041
Chris Lattner54266522004-11-20 23:57:07 +000042
43//===----------------------------------------------------------------------===//
44// JITMemoryManager code.
45//
46namespace {
Chris Lattnere993cc22006-05-11 23:08:08 +000047 /// MemoryRangeHeader - For a range of memory, this is the header that we put
48 /// on the block of memory. It is carefully crafted to be one word of memory.
49 /// Allocated blocks have just this header, free'd blocks have FreeRangeHeader
50 /// which starts with this.
51 struct FreeRangeHeader;
52 struct MemoryRangeHeader {
53 /// ThisAllocated - This is true if this block is currently allocated. If
54 /// not, this can be converted to a FreeRangeHeader.
55 intptr_t ThisAllocated : 1;
56
57 /// PrevAllocated - Keep track of whether the block immediately before us is
58 /// allocated. If not, the word immediately before this header is the size
59 /// of the previous block.
60 intptr_t PrevAllocated : 1;
61
62 /// BlockSize - This is the size in bytes of this memory block,
63 /// including this header.
64 uintptr_t BlockSize : (sizeof(intptr_t)*8 - 2);
65
66
67 /// getBlockAfter - Return the memory block immediately after this one.
68 ///
69 MemoryRangeHeader &getBlockAfter() const {
70 return *(MemoryRangeHeader*)((char*)this+BlockSize);
71 }
72
73 /// getFreeBlockBefore - If the block before this one is free, return it,
74 /// otherwise return null.
75 FreeRangeHeader *getFreeBlockBefore() const {
76 if (PrevAllocated) return 0;
77 intptr_t PrevSize = ((intptr_t *)this)[-1];
78 return (FreeRangeHeader*)((char*)this-PrevSize);
79 }
80
Chris Lattner9f3d1ba2006-05-11 23:56:57 +000081 /// FreeBlock - Turn an allocated block into a free block, adjusting
Chris Lattnere993cc22006-05-11 23:08:08 +000082 /// bits in the object headers, and adding an end of region memory block.
Chris Lattner9f3d1ba2006-05-11 23:56:57 +000083 FreeRangeHeader *FreeBlock(FreeRangeHeader *FreeList);
Chris Lattnere993cc22006-05-11 23:08:08 +000084
85 /// TrimAllocationToSize - If this allocated block is significantly larger
86 /// than NewSize, split it into two pieces (where the former is NewSize
87 /// bytes, including the header), and add the new block to the free list.
88 FreeRangeHeader *TrimAllocationToSize(FreeRangeHeader *FreeList,
89 uint64_t NewSize);
90 };
91
92 /// FreeRangeHeader - For a memory block that isn't already allocated, this
93 /// keeps track of the current block and has a pointer to the next free block.
94 /// Free blocks are kept on a circularly linked list.
95 struct FreeRangeHeader : public MemoryRangeHeader {
96 FreeRangeHeader *Prev;
97 FreeRangeHeader *Next;
98
99 /// getMinBlockSize - Get the minimum size for a memory block. Blocks
100 /// smaller than this size cannot be created.
101 static unsigned getMinBlockSize() {
102 return sizeof(FreeRangeHeader)+sizeof(intptr_t);
103 }
104
105 /// SetEndOfBlockSizeMarker - The word at the end of every free block is
106 /// known to be the size of the free block. Set it for this block.
107 void SetEndOfBlockSizeMarker() {
108 void *EndOfBlock = (char*)this + BlockSize;
109 ((intptr_t *)EndOfBlock)[-1] = BlockSize;
110 }
111
112 FreeRangeHeader *RemoveFromFreeList() {
113 assert(Next->Prev == this && Prev->Next == this && "Freelist broken!");
114 Next->Prev = Prev;
115 return Prev->Next = Next;
116 }
117
118 void AddToFreeList(FreeRangeHeader *FreeList) {
119 Next = FreeList;
120 Prev = FreeList->Prev;
121 Prev->Next = this;
122 Next->Prev = this;
123 }
124
125 /// GrowBlock - The block after this block just got deallocated. Merge it
126 /// into the current block.
127 void GrowBlock(uintptr_t NewSize);
128
129 /// AllocateBlock - Mark this entire block allocated, updating freelists
130 /// etc. This returns a pointer to the circular free-list.
131 FreeRangeHeader *AllocateBlock();
132 };
133}
134
135
136/// AllocateBlock - Mark this entire block allocated, updating freelists
137/// etc. This returns a pointer to the circular free-list.
138FreeRangeHeader *FreeRangeHeader::AllocateBlock() {
139 assert(!ThisAllocated && !getBlockAfter().PrevAllocated &&
140 "Cannot allocate an allocated block!");
141 // Mark this block allocated.
142 ThisAllocated = 1;
143 getBlockAfter().PrevAllocated = 1;
144
145 // Remove it from the free list.
146 return RemoveFromFreeList();
147}
148
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000149/// FreeBlock - Turn an allocated block into a free block, adjusting
Chris Lattnere993cc22006-05-11 23:08:08 +0000150/// bits in the object headers, and adding an end of region memory block.
151/// If possible, coallesce this block with neighboring blocks. Return the
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000152/// FreeRangeHeader to allocate from.
153FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000154 MemoryRangeHeader *FollowingBlock = &getBlockAfter();
155 assert(ThisAllocated && "This block is already allocated!");
156 assert(FollowingBlock->PrevAllocated && "Flags out of sync!");
157
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000158 FreeRangeHeader *FreeListToReturn = FreeList;
159
Chris Lattnere993cc22006-05-11 23:08:08 +0000160 // If the block after this one is free, merge it into this block.
161 if (!FollowingBlock->ThisAllocated) {
162 FreeRangeHeader &FollowingFreeBlock = *(FreeRangeHeader *)FollowingBlock;
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000163 // "FreeList" always needs to be a valid free block. If we're about to
164 // coallesce with it, update our notion of what the free list is.
165 if (&FollowingFreeBlock == FreeList) {
166 FreeList = FollowingFreeBlock.Next;
167 FreeListToReturn = 0;
168 assert(&FollowingFreeBlock != FreeList && "No tombstone block?");
169 }
Chris Lattnere993cc22006-05-11 23:08:08 +0000170 FollowingFreeBlock.RemoveFromFreeList();
171
172 // Include the following block into this one.
173 BlockSize += FollowingFreeBlock.BlockSize;
174 FollowingBlock = &FollowingFreeBlock.getBlockAfter();
175
176 // Tell the block after the block we are coallescing that this block is
177 // allocated.
178 FollowingBlock->PrevAllocated = 1;
179 }
180
181 assert(FollowingBlock->ThisAllocated && "Missed coallescing?");
182
183 if (FreeRangeHeader *PrevFreeBlock = getFreeBlockBefore()) {
184 PrevFreeBlock->GrowBlock(PrevFreeBlock->BlockSize + BlockSize);
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000185 return FreeListToReturn ? FreeListToReturn : PrevFreeBlock;
Chris Lattnere993cc22006-05-11 23:08:08 +0000186 }
187
188 // Otherwise, mark this block free.
189 FreeRangeHeader &FreeBlock = *(FreeRangeHeader*)this;
190 FollowingBlock->PrevAllocated = 0;
191 FreeBlock.ThisAllocated = 0;
192
193 // Link this into the linked list of free blocks.
194 FreeBlock.AddToFreeList(FreeList);
195
196 // Add a marker at the end of the block, indicating the size of this free
197 // block.
198 FreeBlock.SetEndOfBlockSizeMarker();
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000199 return FreeListToReturn ? FreeListToReturn : &FreeBlock;
Chris Lattnere993cc22006-05-11 23:08:08 +0000200}
201
202/// GrowBlock - The block after this block just got deallocated. Merge it
203/// into the current block.
204void FreeRangeHeader::GrowBlock(uintptr_t NewSize) {
205 assert(NewSize > BlockSize && "Not growing block?");
206 BlockSize = NewSize;
207 SetEndOfBlockSizeMarker();
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000208 getBlockAfter().PrevAllocated = 0;
Chris Lattnere993cc22006-05-11 23:08:08 +0000209}
210
211/// TrimAllocationToSize - If this allocated block is significantly larger
212/// than NewSize, split it into two pieces (where the former is NewSize
213/// bytes, including the header), and add the new block to the free list.
214FreeRangeHeader *MemoryRangeHeader::
215TrimAllocationToSize(FreeRangeHeader *FreeList, uint64_t NewSize) {
216 assert(ThisAllocated && getBlockAfter().PrevAllocated &&
217 "Cannot deallocate part of an allocated block!");
218
219 // Round up size for alignment of header.
220 unsigned HeaderAlign = __alignof(FreeRangeHeader);
221 NewSize = (NewSize+ (HeaderAlign-1)) & ~(HeaderAlign-1);
222
223 // Size is now the size of the block we will remove from the start of the
224 // current block.
225 assert(NewSize <= BlockSize &&
226 "Allocating more space from this block than exists!");
227
228 // If splitting this block will cause the remainder to be too small, do not
229 // split the block.
230 if (BlockSize <= NewSize+FreeRangeHeader::getMinBlockSize())
231 return FreeList;
232
233 // Otherwise, we splice the required number of bytes out of this block, form
234 // a new block immediately after it, then mark this block allocated.
235 MemoryRangeHeader &FormerNextBlock = getBlockAfter();
236
237 // Change the size of this block.
238 BlockSize = NewSize;
239
240 // Get the new block we just sliced out and turn it into a free block.
241 FreeRangeHeader &NewNextBlock = (FreeRangeHeader &)getBlockAfter();
242 NewNextBlock.BlockSize = (char*)&FormerNextBlock - (char*)&NewNextBlock;
243 NewNextBlock.ThisAllocated = 0;
244 NewNextBlock.PrevAllocated = 1;
245 NewNextBlock.SetEndOfBlockSizeMarker();
246 FormerNextBlock.PrevAllocated = 0;
247 NewNextBlock.AddToFreeList(FreeList);
248 return &NewNextBlock;
249}
250
251
252namespace {
Chris Lattner688506d2003-08-14 18:35:27 +0000253 /// JITMemoryManager - Manage memory for the JIT code generation in a logical,
254 /// sane way. This splits a large block of MAP_NORESERVE'd memory into two
255 /// sections, one for function stubs, one for the functions themselves. We
256 /// have to do this because we may need to emit a function stub while in the
257 /// middle of emitting a function, and we don't know how large the function we
258 /// are emitting is. This never bothers to release the memory, because when
259 /// we are ready to destroy the JIT, the program exits.
260 class JITMemoryManager {
Chris Lattnere6fdcbf2006-05-03 00:54:49 +0000261 std::vector<sys::MemoryBlock> Blocks; // Memory blocks allocated by the JIT
Chris Lattnere993cc22006-05-11 23:08:08 +0000262 FreeRangeHeader *FreeMemoryList; // Circular list of free blocks.
263
264 // When emitting code into a memory block, this is the block.
265 MemoryRangeHeader *CurBlock;
266
267 unsigned char *CurStubPtr, *StubBase;
Chris Lattnera726c7f2006-05-02 21:44:14 +0000268 unsigned char *GOTBase; // Target Specific reserved memory
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000269
Chris Lattnere993cc22006-05-11 23:08:08 +0000270 // Centralize memory block allocation.
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000271 sys::MemoryBlock getNewMemoryBlock(unsigned size);
Chris Lattnere993cc22006-05-11 23:08:08 +0000272
273 std::map<const Function*, MemoryRangeHeader*> FunctionBlocks;
Chris Lattner688506d2003-08-14 18:35:27 +0000274 public:
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000275 JITMemoryManager(bool useGOT);
Reid Spencer4af3da62004-12-13 16:04:04 +0000276 ~JITMemoryManager();
Misha Brukmanf976c852005-04-21 22:55:34 +0000277
Chris Lattner688506d2003-08-14 18:35:27 +0000278 inline unsigned char *allocateStub(unsigned StubSize);
Chris Lattnere993cc22006-05-11 23:08:08 +0000279
280 /// startFunctionBody - When a function starts, allocate a block of free
281 /// executable memory, returning a pointer to it and its actual size.
282 unsigned char *startFunctionBody(uintptr_t &ActualSize) {
283 CurBlock = FreeMemoryList;
284
285 // Allocate the entire memory block.
286 FreeMemoryList = FreeMemoryList->AllocateBlock();
287 ActualSize = CurBlock->BlockSize-sizeof(MemoryRangeHeader);
288 return (unsigned char *)(CurBlock+1);
289 }
290
291 /// endFunctionBody - The function F is now allocated, and takes the memory
292 /// in the range [FunctionStart,FunctionEnd).
293 void endFunctionBody(const Function *F, unsigned char *FunctionStart,
294 unsigned char *FunctionEnd) {
295 assert(FunctionEnd > FunctionStart);
296 assert(FunctionStart == (unsigned char *)(CurBlock+1) &&
297 "Mismatched function start/end!");
298
299 uintptr_t BlockSize = FunctionEnd - (unsigned char *)CurBlock;
300 FunctionBlocks[F] = CurBlock;
301
302 // Release the memory at the end of this block that isn't needed.
303 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
304 }
Chris Lattnera726c7f2006-05-02 21:44:14 +0000305
306 unsigned char *getGOTBase() const {
307 return GOTBase;
308 }
309 bool isManagingGOT() const {
310 return GOTBase != NULL;
311 }
Chris Lattnere993cc22006-05-11 23:08:08 +0000312
313 /// deallocateMemForFunction - Deallocate all memory for the specified
314 /// function body.
315 void deallocateMemForFunction(const Function *F) {
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000316 std::map<const Function*, MemoryRangeHeader*>::iterator
317 I = FunctionBlocks.find(F);
318 if (I == FunctionBlocks.end()) return;
Chris Lattnere993cc22006-05-11 23:08:08 +0000319
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000320 // Find the block that is allocated for this function.
321 MemoryRangeHeader *MemRange = I->second;
322 assert(MemRange->ThisAllocated && "Block isn't allocated!");
323
324 // Free the memory.
325 FreeMemoryList = MemRange->FreeBlock(FreeMemoryList);
326
327 // Finally, remove this entry from FunctionBlocks.
328 FunctionBlocks.erase(I);
Chris Lattnere993cc22006-05-11 23:08:08 +0000329 }
Chris Lattner688506d2003-08-14 18:35:27 +0000330 };
331}
332
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000333JITMemoryManager::JITMemoryManager(bool useGOT) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000334 // Allocate a 16M block of memory for functions.
335 sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20);
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000336
Chris Lattnere993cc22006-05-11 23:08:08 +0000337 unsigned char *MemBase = reinterpret_cast<unsigned char*>(MemBlock.base());
Chris Lattner281a6012005-01-10 18:23:22 +0000338
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000339 // Allocate stubs backwards from the base, allocate functions forward
340 // from the base.
Chris Lattnere993cc22006-05-11 23:08:08 +0000341 StubBase = MemBase;
342 CurStubPtr = MemBase + 512*1024; // Use 512k for stubs, working backwards.
343
344 // We set up the memory chunk with 4 mem regions, like this:
345 // [ START
346 // [ Free #0 ] -> Large space to allocate functions from.
347 // [ Allocated #1 ] -> Tiny space to separate regions.
348 // [ Free #2 ] -> Tiny space so there is always at least 1 free block.
349 // [ Allocated #3 ] -> Tiny space to prevent looking past end of block.
350 // END ]
351 //
352 // The last three blocks are never deallocated or touched.
353
354 // Add MemoryRangeHeader to the end of the memory region, indicating that
355 // the space after the block of memory is allocated. This is block #3.
356 MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1;
357 Mem3->ThisAllocated = 1;
358 Mem3->PrevAllocated = 0;
359 Mem3->BlockSize = 0;
360
361 /// Add a tiny free region so that the free list always has one entry.
362 FreeRangeHeader *Mem2 =
363 (FreeRangeHeader *)(((char*)Mem3)-FreeRangeHeader::getMinBlockSize());
364 Mem2->ThisAllocated = 0;
365 Mem2->PrevAllocated = 1;
366 Mem2->BlockSize = FreeRangeHeader::getMinBlockSize();
367 Mem2->SetEndOfBlockSizeMarker();
368 Mem2->Prev = Mem2; // Mem2 *is* the free list for now.
369 Mem2->Next = Mem2;
370
371 /// Add a tiny allocated region so that Mem2 is never coallesced away.
372 MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1;
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000373 Mem1->ThisAllocated = 1;
374 Mem1->PrevAllocated = 0;
375 Mem1->BlockSize = (char*)Mem2 - (char*)Mem1;
Chris Lattnere993cc22006-05-11 23:08:08 +0000376
377 // Add a FreeRangeHeader to the start of the function body region, indicating
378 // that the space is free. Mark the previous block allocated so we never look
379 // at it.
380 FreeRangeHeader *Mem0 = (FreeRangeHeader*)CurStubPtr;
381 Mem0->ThisAllocated = 0;
382 Mem0->PrevAllocated = 1;
Chris Lattner9f3d1ba2006-05-11 23:56:57 +0000383 Mem0->BlockSize = (char*)Mem1-(char*)Mem0;
Chris Lattnere993cc22006-05-11 23:08:08 +0000384 Mem0->SetEndOfBlockSizeMarker();
385 Mem0->AddToFreeList(Mem2);
386
387 // Start out with the freelist pointing to Mem0.
388 FreeMemoryList = Mem0;
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000389
Chris Lattnerf5d438c2006-05-02 21:57:51 +0000390 // Allocate the GOT.
Andrew Lenharth2b3b89c2005-08-01 17:35:40 +0000391 GOTBase = NULL;
Chris Lattnerf5d438c2006-05-02 21:57:51 +0000392 if (useGOT) GOTBase = (unsigned char*)malloc(sizeof(void*) * 8192);
Chris Lattner688506d2003-08-14 18:35:27 +0000393}
394
Reid Spencer4af3da62004-12-13 16:04:04 +0000395JITMemoryManager::~JITMemoryManager() {
Chris Lattnere6fdcbf2006-05-03 00:54:49 +0000396 for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
397 sys::Memory::ReleaseRWX(Blocks[i]);
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000398 Blocks.clear();
Reid Spencer4af3da62004-12-13 16:04:04 +0000399}
400
Chris Lattner688506d2003-08-14 18:35:27 +0000401unsigned char *JITMemoryManager::allocateStub(unsigned StubSize) {
402 CurStubPtr -= StubSize;
Chris Lattnere993cc22006-05-11 23:08:08 +0000403 if (CurStubPtr < StubBase) {
Chris Lattnera726c7f2006-05-02 21:44:14 +0000404 // FIXME: allocate a new block
Chris Lattner688506d2003-08-14 18:35:27 +0000405 std::cerr << "JIT ran out of memory for function stubs!\n";
406 abort();
407 }
408 return CurStubPtr;
409}
410
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000411sys::MemoryBlock JITMemoryManager::getNewMemoryBlock(unsigned size) {
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000412 try {
Chris Lattnere993cc22006-05-11 23:08:08 +0000413 // Allocate a new block close to the last one.
Chris Lattnere6fdcbf2006-05-03 00:54:49 +0000414 const sys::MemoryBlock *BOld = Blocks.empty() ? 0 : &Blocks.front();
415 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, BOld);
416 Blocks.push_back(B);
417 return B;
418 } catch (std::string &err) {
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000419 std::cerr << "Allocation failed when allocating new memory in the JIT\n";
420 std::cerr << err << "\n";
421 abort();
422 }
Andrew Lenhartha00269b2005-07-29 23:40:16 +0000423}
424
Chris Lattner54266522004-11-20 23:57:07 +0000425//===----------------------------------------------------------------------===//
426// JIT lazy compilation code.
427//
428namespace {
Reid Spenceree448632005-07-12 15:51:55 +0000429 class JITResolverState {
430 private:
431 /// FunctionToStubMap - Keep track of the stub created for a particular
432 /// function so that we can reuse them if necessary.
433 std::map<Function*, void*> FunctionToStubMap;
434
435 /// StubToFunctionMap - Keep track of the function that each stub
436 /// corresponds to.
437 std::map<void*, Function*> StubToFunctionMap;
Jeff Cohen00b168892005-07-27 06:12:32 +0000438
Reid Spenceree448632005-07-12 15:51:55 +0000439 public:
440 std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) {
441 assert(locked.holds(TheJIT->lock));
442 return FunctionToStubMap;
443 }
Jeff Cohen00b168892005-07-27 06:12:32 +0000444
Reid Spenceree448632005-07-12 15:51:55 +0000445 std::map<void*, Function*>& getStubToFunctionMap(const MutexGuard& locked) {
446 assert(locked.holds(TheJIT->lock));
447 return StubToFunctionMap;
448 }
449 };
Jeff Cohen00b168892005-07-27 06:12:32 +0000450
Chris Lattner54266522004-11-20 23:57:07 +0000451 /// JITResolver - Keep track of, and resolve, call sites for functions that
452 /// have not yet been compiled.
453 class JITResolver {
Chris Lattner5e225582004-11-21 03:37:42 +0000454 /// MCE - The MachineCodeEmitter to use to emit stubs with.
Chris Lattner54266522004-11-20 23:57:07 +0000455 MachineCodeEmitter &MCE;
456
Chris Lattner5e225582004-11-21 03:37:42 +0000457 /// LazyResolverFn - The target lazy resolver function that we actually
458 /// rewrite instructions to use.
459 TargetJITInfo::LazyResolverFn LazyResolverFn;
460
Reid Spenceree448632005-07-12 15:51:55 +0000461 JITResolverState state;
Chris Lattner54266522004-11-20 23:57:07 +0000462
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000463 /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for
464 /// external functions.
465 std::map<void*, void*> ExternalFnToStubMap;
Andrew Lenharth6a974612005-07-28 12:44:13 +0000466
467 //map addresses to indexes in the GOT
468 std::map<void*, unsigned> revGOTMap;
469 unsigned nextGOTIndex;
470
Chris Lattner54266522004-11-20 23:57:07 +0000471 public:
Andrew Lenharth6a974612005-07-28 12:44:13 +0000472 JITResolver(MachineCodeEmitter &mce) : MCE(mce), nextGOTIndex(0) {
Chris Lattner5e225582004-11-21 03:37:42 +0000473 LazyResolverFn =
474 TheJIT->getJITInfo().getLazyResolverFunction(JITCompilerFn);
475 }
Chris Lattner54266522004-11-20 23:57:07 +0000476
477 /// getFunctionStub - This returns a pointer to a function stub, creating
478 /// one on demand as needed.
479 void *getFunctionStub(Function *F);
480
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000481 /// getExternalFunctionStub - Return a stub for the function at the
482 /// specified address, created lazily on demand.
483 void *getExternalFunctionStub(void *FnAddr);
484
Chris Lattner5e225582004-11-21 03:37:42 +0000485 /// AddCallbackAtLocation - If the target is capable of rewriting an
486 /// instruction without the use of a stub, record the location of the use so
487 /// we know which function is being used at the location.
488 void *AddCallbackAtLocation(Function *F, void *Location) {
Reid Spenceree448632005-07-12 15:51:55 +0000489 MutexGuard locked(TheJIT->lock);
Chris Lattner5e225582004-11-21 03:37:42 +0000490 /// Get the target-specific JIT resolver function.
Reid Spenceree448632005-07-12 15:51:55 +0000491 state.getStubToFunctionMap(locked)[Location] = F;
Chris Lattner5e225582004-11-21 03:37:42 +0000492 return (void*)LazyResolverFn;
493 }
494
Andrew Lenharth6a974612005-07-28 12:44:13 +0000495 /// getGOTIndexForAddress - Return a new or existing index in the GOT for
496 /// and address. This function only manages slots, it does not manage the
497 /// contents of the slots or the memory associated with the GOT.
498 unsigned getGOTIndexForAddr(void* addr);
499
Chris Lattner54266522004-11-20 23:57:07 +0000500 /// JITCompilerFn - This function is called to resolve a stub to a compiled
501 /// address. If the LLVM Function corresponding to the stub has not yet
502 /// been compiled, this function compiles it first.
503 static void *JITCompilerFn(void *Stub);
504 };
505}
506
507/// getJITResolver - This function returns the one instance of the JIT resolver.
508///
509static JITResolver &getJITResolver(MachineCodeEmitter *MCE = 0) {
510 static JITResolver TheJITResolver(*MCE);
511 return TheJITResolver;
512}
513
514/// getFunctionStub - This returns a pointer to a function stub, creating
515/// one on demand as needed.
516void *JITResolver::getFunctionStub(Function *F) {
Reid Spenceree448632005-07-12 15:51:55 +0000517 MutexGuard locked(TheJIT->lock);
518
Chris Lattner54266522004-11-20 23:57:07 +0000519 // If we already have a stub for this function, recycle it.
Reid Spenceree448632005-07-12 15:51:55 +0000520 void *&Stub = state.getFunctionToStubMap(locked)[F];
Chris Lattner54266522004-11-20 23:57:07 +0000521 if (Stub) return Stub;
522
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000523 // Call the lazy resolver function unless we already KNOW it is an external
524 // function, in which case we just skip the lazy resolution step.
525 void *Actual = (void*)LazyResolverFn;
Chris Lattner69435702005-02-20 18:43:35 +0000526 if (F->isExternal() && F->hasExternalLinkage())
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000527 Actual = TheJIT->getPointerToFunction(F);
Misha Brukmanf976c852005-04-21 22:55:34 +0000528
Chris Lattner54266522004-11-20 23:57:07 +0000529 // Otherwise, codegen a new stub. For now, the stub will call the lazy
530 // resolver function.
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000531 Stub = TheJIT->getJITInfo().emitFunctionStub(Actual, MCE);
532
Chris Lattner69435702005-02-20 18:43:35 +0000533 if (Actual != (void*)LazyResolverFn) {
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000534 // If we are getting the stub for an external function, we really want the
535 // address of the stub in the GlobalAddressMap for the JIT, not the address
536 // of the external function.
537 TheJIT->updateGlobalMapping(F, Stub);
538 }
Chris Lattner54266522004-11-20 23:57:07 +0000539
Chris Lattnercb479412004-11-21 03:44:32 +0000540 DEBUG(std::cerr << "JIT: Stub emitted at [" << Stub << "] for function '"
Chris Lattner6f717202004-11-22 21:48:33 +0000541 << F->getName() << "'\n");
Chris Lattnercb479412004-11-21 03:44:32 +0000542
Chris Lattner54266522004-11-20 23:57:07 +0000543 // Finally, keep track of the stub-to-Function mapping so that the
544 // JITCompilerFn knows which function to compile!
Reid Spenceree448632005-07-12 15:51:55 +0000545 state.getStubToFunctionMap(locked)[Stub] = F;
Chris Lattner54266522004-11-20 23:57:07 +0000546 return Stub;
547}
548
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000549/// getExternalFunctionStub - Return a stub for the function at the
550/// specified address, created lazily on demand.
551void *JITResolver::getExternalFunctionStub(void *FnAddr) {
552 // If we already have a stub for this function, recycle it.
553 void *&Stub = ExternalFnToStubMap[FnAddr];
554 if (Stub) return Stub;
555
556 Stub = TheJIT->getJITInfo().emitFunctionStub(FnAddr, MCE);
557 DEBUG(std::cerr << "JIT: Stub emitted at [" << Stub
558 << "] for external function at '" << FnAddr << "'\n");
559 return Stub;
560}
561
Andrew Lenharth6a974612005-07-28 12:44:13 +0000562unsigned JITResolver::getGOTIndexForAddr(void* addr) {
563 unsigned idx = revGOTMap[addr];
564 if (!idx) {
565 idx = ++nextGOTIndex;
566 revGOTMap[addr] = idx;
567 DEBUG(std::cerr << "Adding GOT entry " << idx
568 << " for addr " << addr << "\n");
569 // ((void**)MemMgr.getGOTBase())[idx] = addr;
570 }
571 return idx;
572}
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000573
Chris Lattner54266522004-11-20 23:57:07 +0000574/// JITCompilerFn - This function is called when a lazy compilation stub has
575/// been entered. It looks up which function this stub corresponds to, compiles
576/// it if necessary, then returns the resultant function pointer.
577void *JITResolver::JITCompilerFn(void *Stub) {
578 JITResolver &JR = getJITResolver();
Misha Brukmanf976c852005-04-21 22:55:34 +0000579
Reid Spenceree448632005-07-12 15:51:55 +0000580 MutexGuard locked(TheJIT->lock);
581
Chris Lattner54266522004-11-20 23:57:07 +0000582 // The address given to us for the stub may not be exactly right, it might be
583 // a little bit after the stub. As such, use upper_bound to find it.
584 std::map<void*, Function*>::iterator I =
Reid Spenceree448632005-07-12 15:51:55 +0000585 JR.state.getStubToFunctionMap(locked).upper_bound(Stub);
Chris Lattner21998772006-01-07 06:20:51 +0000586 assert(I != JR.state.getStubToFunctionMap(locked).begin() &&
587 "This is not a known stub!");
Chris Lattner54266522004-11-20 23:57:07 +0000588 Function *F = (--I)->second;
589
Reid Spenceree448632005-07-12 15:51:55 +0000590 // We might like to remove the stub from the StubToFunction map.
591 // We can't do that! Multiple threads could be stuck, waiting to acquire the
592 // lock above. As soon as the 1st function finishes compiling the function,
Chris Lattner21998772006-01-07 06:20:51 +0000593 // the next one will be released, and needs to be able to find the function it
594 // needs to call.
Reid Spenceree448632005-07-12 15:51:55 +0000595 //JR.state.getStubToFunctionMap(locked).erase(I);
Chris Lattner54266522004-11-20 23:57:07 +0000596
Chris Lattnercb479412004-11-21 03:44:32 +0000597 DEBUG(std::cerr << "JIT: Lazily resolving function '" << F->getName()
Chris Lattner54266522004-11-20 23:57:07 +0000598 << "' In stub ptr = " << Stub << " actual ptr = "
599 << I->first << "\n");
600
601 void *Result = TheJIT->getPointerToFunction(F);
602
603 // We don't need to reuse this stub in the future, as F is now compiled.
Reid Spenceree448632005-07-12 15:51:55 +0000604 JR.state.getFunctionToStubMap(locked).erase(F);
Chris Lattner54266522004-11-20 23:57:07 +0000605
606 // FIXME: We could rewrite all references to this stub if we knew them.
Andrew Lenharth6a974612005-07-28 12:44:13 +0000607
Jeff Cohend29b6aa2005-07-30 18:33:25 +0000608 // What we will do is set the compiled function address to map to the
609 // same GOT entry as the stub so that later clients may update the GOT
Andrew Lenharth6a974612005-07-28 12:44:13 +0000610 // if they see it still using the stub address.
611 // Note: this is done so the Resolver doesn't have to manage GOT memory
612 // Do this without allocating map space if the target isn't using a GOT
613 if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end())
614 JR.revGOTMap[Result] = JR.revGOTMap[Stub];
615
Chris Lattner54266522004-11-20 23:57:07 +0000616 return Result;
617}
Chris Lattner688506d2003-08-14 18:35:27 +0000618
619
Chris Lattner54266522004-11-20 23:57:07 +0000620//===----------------------------------------------------------------------===//
Chris Lattner166f2262004-11-22 22:00:25 +0000621// JITEmitter code.
Chris Lattner54266522004-11-20 23:57:07 +0000622//
Chris Lattner688506d2003-08-14 18:35:27 +0000623namespace {
Chris Lattner166f2262004-11-22 22:00:25 +0000624 /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
625 /// used to output functions to memory for execution.
626 class JITEmitter : public MachineCodeEmitter {
Chris Lattner688506d2003-08-14 18:35:27 +0000627 JITMemoryManager MemMgr;
628
Chris Lattner6125fdd2003-05-09 03:30:07 +0000629 // When outputting a function stub in the context of some other function, we
Chris Lattner43b429b2006-05-02 18:27:26 +0000630 // save BufferBegin/BufferEnd/CurBufferPtr here.
631 unsigned char *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000632
Chris Lattner5be478f2004-11-20 03:46:14 +0000633 /// Relocations - These are the relocations that the function needs, as
634 /// emitted.
635 std::vector<MachineRelocation> Relocations;
Chris Lattnerb4432f32006-05-03 17:10:41 +0000636
637 /// MBBLocations - This vector is a mapping from MBB ID's to their address.
638 /// It is filled in by the StartMachineBasicBlock callback and queried by
639 /// the getMachineBasicBlockAddress callback.
640 std::vector<intptr_t> MBBLocations;
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000641
Chris Lattner239862c2006-02-09 04:49:59 +0000642 /// ConstantPool - The constant pool for the current function.
643 ///
644 MachineConstantPool *ConstantPool;
645
646 /// ConstantPoolBase - A pointer to the first entry in the constant pool.
647 ///
648 void *ConstantPoolBase;
Nate Begeman37efe672006-04-22 18:53:45 +0000649
650 /// ConstantPool - The constant pool for the current function.
651 ///
652 MachineJumpTableInfo *JumpTable;
653
654 /// JumpTableBase - A pointer to the first entry in the jump table.
655 ///
656 void *JumpTableBase;
657public:
Chris Lattner239862c2006-02-09 04:49:59 +0000658 JITEmitter(JIT &jit) : MemMgr(jit.getJITInfo().needsGOT()) {
Jeff Cohen00b168892005-07-27 06:12:32 +0000659 TheJIT = &jit;
Chris Lattnera726c7f2006-05-02 21:44:14 +0000660 DEBUG(if (MemMgr.isManagingGOT()) std::cerr << "JIT is managing a GOT\n");
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000661 }
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000662
663 virtual void startFunction(MachineFunction &F);
Chris Lattner43b429b2006-05-02 18:27:26 +0000664 virtual bool finishFunction(MachineFunction &F);
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000665
666 void emitConstantPool(MachineConstantPool *MCP);
667 void initJumpTableInfo(MachineJumpTableInfo *MJTI);
Chris Lattnerb4432f32006-05-03 17:10:41 +0000668 void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000669
Chris Lattner54266522004-11-20 23:57:07 +0000670 virtual void startFunctionStub(unsigned StubSize);
671 virtual void* finishFunctionStub(const Function *F);
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000672
Chris Lattner5be478f2004-11-20 03:46:14 +0000673 virtual void addRelocation(const MachineRelocation &MR) {
674 Relocations.push_back(MR);
675 }
Chris Lattnerb4432f32006-05-03 17:10:41 +0000676
677 virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
678 if (MBBLocations.size() <= (unsigned)MBB->getNumber())
679 MBBLocations.resize((MBB->getNumber()+1)*2);
680 MBBLocations[MBB->getNumber()] = getCurrentPCValue();
681 }
Chris Lattner5be478f2004-11-20 03:46:14 +0000682
Chris Lattnerb4432f32006-05-03 17:10:41 +0000683 virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const;
684 virtual intptr_t getJumpTableEntryAddress(unsigned Entry) const;
685
686 virtual intptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
687 assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
688 MBBLocations[MBB->getNumber()] && "MBB not emitted!");
689 return MBBLocations[MBB->getNumber()];
690 }
691
Chris Lattnere993cc22006-05-11 23:08:08 +0000692 /// deallocateMemForFunction - Deallocate all memory for the specified
693 /// function body.
694 void deallocateMemForFunction(Function *F) {
695 MemMgr.deallocateMemForFunction(F);
696 }
Chris Lattner54266522004-11-20 23:57:07 +0000697 private:
Chris Lattner5e225582004-11-21 03:37:42 +0000698 void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub);
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000699 };
700}
701
Chris Lattner166f2262004-11-22 22:00:25 +0000702void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
703 bool DoesntNeedStub) {
Chris Lattner54266522004-11-20 23:57:07 +0000704 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
705 /// FIXME: If we straightened things out, this could actually emit the
706 /// global immediately instead of queuing it for codegen later!
Chris Lattner54266522004-11-20 23:57:07 +0000707 return TheJIT->getOrEmitGlobalVariable(GV);
708 }
709
710 // If we have already compiled the function, return a pointer to its body.
711 Function *F = cast<Function>(V);
712 void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
713 if (ResultPtr) return ResultPtr;
714
Chris Lattner532343b2004-11-30 17:41:49 +0000715 if (F->hasExternalLinkage() && F->isExternal()) {
Chris Lattner54266522004-11-20 23:57:07 +0000716 // If this is an external function pointer, we can force the JIT to
717 // 'compile' it, which really just adds it to the map.
Chris Lattnerb43dbdc2004-11-22 07:24:43 +0000718 if (DoesntNeedStub)
719 return TheJIT->getPointerToFunction(F);
720
721 return getJITResolver(this).getFunctionStub(F);
Chris Lattner54266522004-11-20 23:57:07 +0000722 }
723
Chris Lattner5e225582004-11-21 03:37:42 +0000724 // Okay, the function has not been compiled yet, if the target callback
725 // mechanism is capable of rewriting the instruction directly, prefer to do
726 // that instead of emitting a stub.
727 if (DoesntNeedStub)
728 return getJITResolver(this).AddCallbackAtLocation(F, Reference);
729
Chris Lattner54266522004-11-20 23:57:07 +0000730 // Otherwise, we have to emit a lazy resolving stub.
731 return getJITResolver(this).getFunctionStub(F);
732}
733
Chris Lattner166f2262004-11-22 22:00:25 +0000734void JITEmitter::startFunction(MachineFunction &F) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000735 uintptr_t ActualSize;
736 BufferBegin = CurBufferPtr = MemMgr.startFunctionBody(ActualSize);
737 BufferEnd = BufferBegin+ActualSize;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000738
739 emitConstantPool(F.getConstantPool());
740 initJumpTableInfo(F.getJumpTableInfo());
741
742 // About to start emitting the machine code for the function.
Chris Lattner0eb4d6b2006-05-03 01:03:20 +0000743 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000744 TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr);
Chris Lattnerb4432f32006-05-03 17:10:41 +0000745
746 MBBLocations.clear();
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000747}
748
Chris Lattner43b429b2006-05-02 18:27:26 +0000749bool JITEmitter::finishFunction(MachineFunction &F) {
Chris Lattnere993cc22006-05-11 23:08:08 +0000750 if (CurBufferPtr == BufferEnd) {
751 // FIXME: Allocate more space, then try again.
752 std::cerr << "JIT: Ran out of space for generated machine code!\n";
753 abort();
754 }
755
Chris Lattnerb4432f32006-05-03 17:10:41 +0000756 emitJumpTableInfo(F.getJumpTableInfo());
757
Chris Lattnere993cc22006-05-11 23:08:08 +0000758 MemMgr.endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
Chris Lattner43b429b2006-05-02 18:27:26 +0000759 NumBytes += getCurrentPCOffset();
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000760
Chris Lattner5be478f2004-11-20 03:46:14 +0000761 if (!Relocations.empty()) {
Chris Lattnere884dc22005-07-20 16:29:20 +0000762 NumRelos += Relocations.size();
763
Chris Lattner5be478f2004-11-20 03:46:14 +0000764 // Resolve the relocations to concrete pointers.
765 for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
766 MachineRelocation &MR = Relocations[i];
767 void *ResultPtr;
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000768 if (MR.isString()) {
Chris Lattner5be478f2004-11-20 03:46:14 +0000769 ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString());
Misha Brukmanf976c852005-04-21 22:55:34 +0000770
Chris Lattnerd91ff7c2005-04-18 01:44:27 +0000771 // If the target REALLY wants a stub for this function, emit it now.
772 if (!MR.doesntNeedFunctionStub())
773 ResultPtr = getJITResolver(this).getExternalFunctionStub(ResultPtr);
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000774 } else if (MR.isGlobalValue()) {
Chris Lattner5e225582004-11-21 03:37:42 +0000775 ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
Chris Lattner43b429b2006-05-02 18:27:26 +0000776 BufferBegin+MR.getMachineCodeOffset(),
Chris Lattner5e225582004-11-21 03:37:42 +0000777 MR.doesntNeedFunctionStub());
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000778 } else {
779 assert(MR.isConstantPoolIndex());
780 ResultPtr=(void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
781 }
Jeff Cohen00b168892005-07-27 06:12:32 +0000782
Chris Lattner5be478f2004-11-20 03:46:14 +0000783 MR.setResultPointer(ResultPtr);
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000784
Andrew Lenharth6a974612005-07-28 12:44:13 +0000785 // if we are managing the GOT and the relocation wants an index,
786 // give it one
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000787 if (MemMgr.isManagingGOT() && MR.isGOTRelative()) {
Andrew Lenharth6a974612005-07-28 12:44:13 +0000788 unsigned idx = getJITResolver(this).getGOTIndexForAddr(ResultPtr);
789 MR.setGOTIndex(idx);
790 if (((void**)MemMgr.getGOTBase())[idx] != ResultPtr) {
791 DEBUG(std::cerr << "GOT was out of date for " << ResultPtr
Chris Lattner21998772006-01-07 06:20:51 +0000792 << " pointing at " << ((void**)MemMgr.getGOTBase())[idx]
793 << "\n");
Andrew Lenharth6a974612005-07-28 12:44:13 +0000794 ((void**)MemMgr.getGOTBase())[idx] = ResultPtr;
795 }
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000796 }
Chris Lattner5be478f2004-11-20 03:46:14 +0000797 }
798
Chris Lattner43b429b2006-05-02 18:27:26 +0000799 TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
Andrew Lenharth16ec33c2005-07-22 20:48:12 +0000800 Relocations.size(), MemMgr.getGOTBase());
Chris Lattner5be478f2004-11-20 03:46:14 +0000801 }
802
Chris Lattnerd2d5c762006-05-03 18:55:56 +0000803 // Update the GOT entry for F to point to the new code.
Andrew Lenharth6a974612005-07-28 12:44:13 +0000804 if(MemMgr.isManagingGOT()) {
Chris Lattner43b429b2006-05-02 18:27:26 +0000805 unsigned idx = getJITResolver(this).getGOTIndexForAddr((void*)BufferBegin);
806 if (((void**)MemMgr.getGOTBase())[idx] != (void*)BufferBegin) {
807 DEBUG(std::cerr << "GOT was out of date for " << (void*)BufferBegin
Andrew Lenharth6a974612005-07-28 12:44:13 +0000808 << " pointing at " << ((void**)MemMgr.getGOTBase())[idx] << "\n");
Chris Lattner43b429b2006-05-02 18:27:26 +0000809 ((void**)MemMgr.getGOTBase())[idx] = (void*)BufferBegin;
Andrew Lenharth6a974612005-07-28 12:44:13 +0000810 }
811 }
812
Chris Lattner43b429b2006-05-02 18:27:26 +0000813 DEBUG(std::cerr << "JIT: Finished CodeGen of [" << (void*)BufferBegin
Misha Brukman1d440852003-06-06 06:52:35 +0000814 << "] Function: " << F.getFunction()->getName()
Chris Lattner43b429b2006-05-02 18:27:26 +0000815 << ": " << getCurrentPCOffset() << " bytes of text, "
Chris Lattner5be478f2004-11-20 03:46:14 +0000816 << Relocations.size() << " relocations\n");
817 Relocations.clear();
Chris Lattner43b429b2006-05-02 18:27:26 +0000818 return false;
Chris Lattnerbd199fb2002-12-24 00:01:05 +0000819}
820
Chris Lattner166f2262004-11-22 22:00:25 +0000821void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
Chris Lattnerfa77d432006-02-09 04:22:52 +0000822 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
Chris Lattner2c0a6a12003-11-30 04:23:21 +0000823 if (Constants.empty()) return;
824
Chris Lattner3029f922006-02-09 04:46:04 +0000825 unsigned Size = Constants.back().Offset;
Owen Andersona69571c2006-05-03 01:29:57 +0000826 Size += TheJIT->getTargetData()->getTypeSize(Constants.back().Val->getType());
Chris Lattner2c0a6a12003-11-30 04:23:21 +0000827
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000828 ConstantPoolBase = allocateSpace(Size, 1 << MCP->getConstantPoolAlignment());
Chris Lattner239862c2006-02-09 04:49:59 +0000829 ConstantPool = MCP;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000830
831 if (ConstantPoolBase == 0) return; // Buffer overflow.
832
Chris Lattner239862c2006-02-09 04:49:59 +0000833 // Initialize the memory for all of the constant pool entries.
Chris Lattner3029f922006-02-09 04:46:04 +0000834 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
Chris Lattner239862c2006-02-09 04:49:59 +0000835 void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset;
Chris Lattner3029f922006-02-09 04:46:04 +0000836 TheJIT->InitializeMemory(Constants[i].Val, CAddr);
Chris Lattner1cc08382003-01-13 01:00:12 +0000837 }
838}
839
Nate Begeman37efe672006-04-22 18:53:45 +0000840void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
841 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
842 if (JT.empty()) return;
843
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000844 unsigned NumEntries = 0;
Nate Begeman37efe672006-04-22 18:53:45 +0000845 for (unsigned i = 0, e = JT.size(); i != e; ++i)
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000846 NumEntries += JT[i].MBBs.size();
847
848 unsigned EntrySize = MJTI->getEntrySize();
849
Nate Begeman37efe672006-04-22 18:53:45 +0000850 // Just allocate space for all the jump tables now. We will fix up the actual
851 // MBB entries in the tables after we emit the code for each block, since then
852 // we will know the final locations of the MBBs in memory.
853 JumpTable = MJTI;
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000854 JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment());
Nate Begeman37efe672006-04-22 18:53:45 +0000855}
856
Chris Lattnerb4432f32006-05-03 17:10:41 +0000857void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
Nate Begeman37efe672006-04-22 18:53:45 +0000858 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
Chris Lattnerf75f9be2006-05-02 23:22:24 +0000859 if (JT.empty() || JumpTableBase == 0) return;
Nate Begeman37efe672006-04-22 18:53:45 +0000860
861 unsigned Offset = 0;
Chris Lattner32ca55f2006-05-03 00:13:06 +0000862 assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?");
Nate Begeman37efe672006-04-22 18:53:45 +0000863
864 // For each jump table, map each target in the jump table to the address of
865 // an emitted MachineBasicBlock.
Chris Lattner32ca55f2006-05-03 00:13:06 +0000866 intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
867
Nate Begeman37efe672006-04-22 18:53:45 +0000868 for (unsigned i = 0, e = JT.size(); i != e; ++i) {
869 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
Chris Lattner32ca55f2006-05-03 00:13:06 +0000870 // Store the address of the basic block for this jump table slot in the
871 // memory we allocated for the jump table in 'initJumpTableInfo'
872 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
Chris Lattnerb4432f32006-05-03 17:10:41 +0000873 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]);
Nate Begeman37efe672006-04-22 18:53:45 +0000874 }
875}
876
Chris Lattner166f2262004-11-22 22:00:25 +0000877void JITEmitter::startFunctionStub(unsigned StubSize) {
Chris Lattner43b429b2006-05-02 18:27:26 +0000878 SavedBufferBegin = BufferBegin;
879 SavedBufferEnd = BufferEnd;
880 SavedCurBufferPtr = CurBufferPtr;
881
882 BufferBegin = CurBufferPtr = MemMgr.allocateStub(StubSize);
883 BufferEnd = BufferBegin+StubSize+1;
Chris Lattner6125fdd2003-05-09 03:30:07 +0000884}
885
Chris Lattner166f2262004-11-22 22:00:25 +0000886void *JITEmitter::finishFunctionStub(const Function *F) {
Chris Lattner43b429b2006-05-02 18:27:26 +0000887 NumBytes += getCurrentPCOffset();
888 std::swap(SavedBufferBegin, BufferBegin);
889 BufferEnd = SavedBufferEnd;
890 CurBufferPtr = SavedCurBufferPtr;
891 return SavedBufferBegin;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000892}
893
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000894// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry
895// in the constant pool that was last emitted with the 'emitConstantPool'
896// method.
897//
Chris Lattnerb4432f32006-05-03 17:10:41 +0000898intptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
Chris Lattner239862c2006-02-09 04:49:59 +0000899 assert(ConstantNum < ConstantPool->getConstants().size() &&
Misha Brukman3c944972005-04-22 04:08:30 +0000900 "Invalid ConstantPoolIndex!");
Chris Lattner239862c2006-02-09 04:49:59 +0000901 return (intptr_t)ConstantPoolBase +
902 ConstantPool->getConstants()[ConstantNum].Offset;
Chris Lattnerbba1b6d2003-06-01 23:24:36 +0000903}
904
Nate Begeman37efe672006-04-22 18:53:45 +0000905// getJumpTableEntryAddress - Return the address of the JumpTable with index
906// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo'
907//
Chris Lattnerb4432f32006-05-03 17:10:41 +0000908intptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
Nate Begeman37efe672006-04-22 18:53:45 +0000909 const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
910 assert(Index < JT.size() && "Invalid jump table index!");
911
912 unsigned Offset = 0;
913 unsigned EntrySize = JumpTable->getEntrySize();
914
915 for (unsigned i = 0; i < Index; ++i)
916 Offset += JT[i].MBBs.size() * EntrySize;
917
Nate Begemanc34b2272006-04-25 17:46:32 +0000918 return (intptr_t)((char *)JumpTableBase + Offset);
Nate Begeman37efe672006-04-22 18:53:45 +0000919}
920
Chris Lattnere993cc22006-05-11 23:08:08 +0000921//===----------------------------------------------------------------------===//
922// Public interface to this file
923//===----------------------------------------------------------------------===//
924
925MachineCodeEmitter *JIT::createEmitter(JIT &jit) {
926 return new JITEmitter(jit);
927}
928
Misha Brukmand69c1e62003-07-28 19:09:06 +0000929// getPointerToNamedFunction - This function is used as a global wrapper to
Chris Lattner4d326fa2003-12-20 01:46:27 +0000930// JIT::getPointerToNamedFunction for the purpose of resolving symbols when
Misha Brukmand69c1e62003-07-28 19:09:06 +0000931// bugpoint is debugging the JIT. In that scenario, we are loading an .so and
932// need to resolve function(s) that are being mis-codegenerated, so we need to
933// resolve their addresses at runtime, and this is the way to do it.
934extern "C" {
935 void *getPointerToNamedFunction(const char *Name) {
Chris Lattner4d326fa2003-12-20 01:46:27 +0000936 Module &M = TheJIT->getModule();
Misha Brukmand69c1e62003-07-28 19:09:06 +0000937 if (Function *F = M.getNamedFunction(Name))
Chris Lattner4d326fa2003-12-20 01:46:27 +0000938 return TheJIT->getPointerToFunction(F);
939 return TheJIT->getPointerToNamedFunction(Name);
Misha Brukmand69c1e62003-07-28 19:09:06 +0000940 }
941}
Chris Lattnere993cc22006-05-11 23:08:08 +0000942
943// getPointerToFunctionOrStub - If the specified function has been
944// code-gen'd, return a pointer to the function. If not, compile it, or use
945// a stub to implement lazy compilation if available.
946//
947void *JIT::getPointerToFunctionOrStub(Function *F) {
948 // If we have already code generated the function, just return the address.
949 if (void *Addr = getPointerToGlobalIfAvailable(F))
950 return Addr;
951
952 // Get a stub if the target supports it
953 return getJITResolver(MCE).getFunctionStub(F);
954}
955
956/// freeMachineCodeForFunction - release machine code memory for given Function.
957///
958void JIT::freeMachineCodeForFunction(Function *F) {
959 // Delete translation for this from the ExecutionEngine, so it will get
960 // retranslated next time it is used.
961 updateGlobalMapping(F, 0);
962
963 // Free the actual memory for the function body and related stuff.
964 assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?");
965 dynamic_cast<JITEmitter*>(MCE)->deallocateMemForFunction(F);
966}
967