blob: 95c870098751fb3e81bac029cfe9a505af31e016 [file] [log] [blame]
Shih-wei Liao77ed6142010-04-07 12:21:42 -07001/*
2 * Bitcode compiler (bcc) for Android:
3 * This is an eager-compilation JIT running on Android.
4 *
5 */
6
7#define LOG_TAG "bcc"
8#include <cutils/log.h>
9
10#include <ctype.h>
11#include <errno.h>
12#include <limits.h>
13#include <stdarg.h>
14#include <stdint.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18#include <unistd.h>
19
20#include <cutils/hashmap.h>
21
22#if defined(__i386__)
23#include <sys/mman.h>
24#endif
25
26#if defined(__arm__)
27 #define DEFAULT_ARM_CODEGEN
28 #define PROVIDE_ARM_CODEGEN
29#elif defined(__i386__)
30 #define DEFAULT_X86_CODEGEN
31 #define PROVIDE_X86_CODEGEN
32#elif defined(__x86_64__)
33 #define DEFAULT_X64_CODEGEN
34 #define PROVIDE_X64_CODEGEN
35#endif
36
37#if defined(FORCE_ARM_CODEGEN)
38 #define DEFAULT_ARM_CODEGEN
39 #undef DEFAULT_X86_CODEGEN
40 #undef DEFAULT_X64_CODEGEN
41 #define PROVIDE_ARM_CODEGEN
42 #undef PROVIDE_X86_CODEGEN
43 #undef PROVIDE_X64_CODEGEN
44#elif defined(FORCE_X86_CODEGEN)
45 #undef DEFAULT_ARM_CODEGEN
46 #define DEFAULT_X86_CODEGEN
47 #undef DEFAULT_X64_CODEGEN
48 #undef PROVIDE_ARM_CODEGEN
49 #define PROVIDE_X86_CODEGEN
50 #undef PROVIDE_X64_CODEGEN
51#elif defined(FORCE_X64_CODEGEN)
52 #undef DEFAULT_ARM_CODEGEN
53 #undef DEFAULT_X86_CODEGEN
54 #define DEFAULT_X64_CODEGEN
55 #undef PROVIDE_ARM_CODEGEN
56 #undef PROVIDE_X86_CODEGEN
57 #define PROVIDE_X64_CODEGEN
58#endif
59
60#if defined(DEFAULT_ARM_CODEGEN)
61 #define TARGET_TRIPLE_STRING "armv7-none-linux-gnueabi"
62#elif defined(DEFAULT_X86_CODEGEN)
63 #define TARGET_TRIPLE_STRING "i686-unknown-linux"
64#elif defined(DEFAULT_X64_CODEGEN)
65 #define TARGET_TRIPLE_STRING "x86_64-unknown-linux"
66#endif
67
68#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
69#define ARM_USE_VFP
70#endif
71
72#include <bcc/bcc.h>
73#include "bcc_runtime.h"
74
75#define LOG_API(...) do {} while(0)
76// #define LOG_API(...) fprintf (stderr, __VA_ARGS__)
77
78#define LOG_STACK(...) do {} while(0)
79// #define LOG_STACK(...) fprintf (stderr, __VA_ARGS__)
80
81// #define PROVIDE_TRACE_CODEGEN
82
83#if defined(USE_DISASSEMBLER)
84# include "disassembler/dis-asm.h"
85# include <cstdio>
86#endif
87
88#include <set>
89#include <map>
90#include <list>
91#include <cmath>
92#include <string>
93#include <cstring>
94#include <algorithm> /* for std::reverse */
95
96// Basic
97#include "llvm/Use.h" /* for class llvm::Use */
98#include "llvm/User.h" /* for class llvm::User */
99#include "llvm/Module.h" /* for class llvm::Module */
100#include "llvm/Function.h" /* for class llvm::Function */
101#include "llvm/Constant.h" /* for class llvm::Constant */
102#include "llvm/Constants.h" /* for class llvm::ConstantExpr */
103#include "llvm/Instruction.h" /* for class llvm::Instruction */
104#include "llvm/PassManager.h" /* for class llvm::PassManager and
105 * llvm::FunctionPassManager
106 */
107#include "llvm/LLVMContext.h" /* for llvm::getGlobalContext() */
108#include "llvm/GlobalValue.h" /* for class llvm::GlobalValue */
109#include "llvm/Instructions.h" /* for class llvm::CallInst */
110#include "llvm/OperandTraits.h" /* for macro
111 * DECLARE_TRANSPARENT_OPERAND_ACCESSORS
112 * and macro
113 * DEFINE_TRANSPARENT_OPERAND_ACCESSORS
114 */
115#include "llvm/TypeSymbolTable.h" /* for Type Reflection */
116
117// System
118#include "llvm/System/Host.h" /* for function
119 * llvm::sys::isLittleEndianHost()
120 */
121#include "llvm/System/Memory.h" /* for class llvm::sys::MemoryBlock */
122
123// ADT
124#include "llvm/ADT/APInt.h" /* for class llvm::APInt */
125#include "llvm/ADT/APFloat.h" /* for class llvm::APFloat */
126#include "llvm/ADT/DenseMap.h" /* for class llvm::DenseMap */
127#include "llvm/ADT/ValueMap.h" /* for class llvm::ValueMap and
128 * class llvm::ValueMapConfig
129 */
130#include "llvm/ADT/StringMap.h" /* for class llvm::StringMap */
131#include "llvm/ADT/OwningPtr.h" /* for class llvm::OwningPtr */
132#include "llvm/ADT/SmallString.h" /* for class llvm::SmallString */
133
134// Target
135#include "llvm/Target/TargetData.h" /* for class llvm::TargetData */
136#include "llvm/Target/TargetSelect.h" /* for function
137 * LLVMInitialize[ARM|X86]
138 * [TargetInfo|Target]()
139 */
140#include "llvm/Target/TargetOptions.h" /* for
141 * variable bool llvm::UseSoftFloat
142 * FloatABI::ABIType llvm::FloatABIType
143 * bool llvm::NoZerosInBSS
144 */
145#include "llvm/Target/TargetMachine.h" /* for class llvm::TargetMachine and
146 * llvm::TargetMachine::AssemblyFile
147 */
148#include "llvm/Target/TargetJITInfo.h" /* for class llvm::TargetJITInfo */
149#include "llvm/Target/TargetRegistry.h" /* for class llvm::TargetRegistry */
150#include "llvm/Target/SubtargetFeature.h"
151 /* for class llvm::SubtargetFeature */
152
153// Support
154#include "llvm/Support/Casting.h" /* for class cast<> */
155#include "llvm/Support/raw_ostream.h" /* for class llvm::raw_ostream and
156 * llvm::raw_string_ostream
157 */
158#include "llvm/Support/ValueHandle.h" /* for class AssertingVH<> */
159#include "llvm/Support/MemoryBuffer.h" /* for class llvm::MemoryBuffer */
160#include "llvm/Support/ManagedStatic.h" /* for class llvm::llvm_shutdown */
161#include "llvm/Support/ErrorHandling.h" /* for function
162 * llvm::llvm_install_error_handler()
163 * and macro llvm_unreachable()
164 */
165#include "llvm/Support/StandardPasses.h"/* for function
166 * llvm::createStandardFunctionPasses()
167 * and
168 * llvm::createStandardModulePasses()
169 */
170#include "llvm/Support/FormattedStream.h"
171 /* for
172 * class llvm::formatted_raw_ostream
173 * llvm::formatted_raw_ostream::
174 * PRESERVE_STREAM
175 * llvm::FileModel::Error
176 */
177
178// Bitcode
179#include "llvm/Bitcode/ReaderWriter.h" /* for function
180 * llvm::ParseBitcodeFile()
181 */
182
183// CodeGen
184#include "llvm/CodeGen/Passes.h" /* for
185 * llvm::createLocalRegisterAllocator()
186 * and
187 * llvm::
188 * createLinearScanRegisterAllocator()
189 */
190#include "llvm/CodeGen/JITCodeEmitter.h"/* for class llvm::JITCodeEmitter */
191#include "llvm/CodeGen/MachineFunction.h"
192 /* for class llvm::MachineFunction */
193#include "llvm/CodeGen/RegAllocRegistry.h"
194 /* for class llvm::RegisterRegAlloc */
195#include "llvm/CodeGen/SchedulerRegistry.h"
196 /* for class llvm::RegisterScheduler
197 * and llvm::createDefaultScheduler()
198 */
199#include "llvm/CodeGen/MachineRelocation.h"
200 /* for class llvm::MachineRelocation */
201#include "llvm/CodeGen/MachineModuleInfo.h"
202 /* for class llvm::MachineModuleInfo */
203#include "llvm/CodeGen/MachineCodeEmitter.h"
204 /* for class llvm::MachineCodeEmitter */
205#include "llvm/CodeGen/MachineConstantPool.h"
206 /* for class llvm::MachineConstantPool
207 */
208#include "llvm/CodeGen/MachineJumpTableInfo.h"
209 /* for class llvm::MachineJumpTableInfo
210 */
211
212// ExecutionEngine
213#include "llvm/ExecutionEngine/GenericValue.h"
214 /* for struct llvm::GenericValue */
215#include "llvm/ExecutionEngine/JITMemoryManager.h"
216 /* for class llvm::JITMemoryManager */
217
218
219/*
220 * Compilation class that suits Android's needs.
221 * (Support: no argument passed, ...)
222 */
223
224namespace bcc {
225
226class Compiler {
227 /*
228 * This part is designed to be orthogonal to those exported bcc*() functions
229 * implementation and internal struct BCCscript.
230 */
231
232
233 /*********************************************
234 * The variable section below (e.g., Triple, CodeGenOptLevel)
235 * is initialized in GlobalInitialization()
236 */
237 static bool GlobalInitialized;
238
239 /*
240 * If given, this will be the name of the target triple to compile for.
241 * If not given, the initial values defined in this file will be used.
242 */
243 static std::string Triple;
244
245 static llvm::CodeGenOpt::Level CodeGenOptLevel;
246 /*
247 * End of section of GlobalInitializing variables
248 **********************************************/
249
250 /* If given, the name of the target CPU to generate code for. */
251 static std::string CPU;
252
253 /*
254 * The list of target specific features to enable or disable -- this should
255 * be a list of strings starting with '+' (enable) or '-' (disable).
256 */
257 static std::vector<std::string> Features;
258
259 struct Runtime {
260 const char* mName;
261 void* mPtr;
262 };
263 static struct Runtime Runtimes[];
264
265 static void GlobalInitialization() {
266 if(GlobalInitialized) return;
267
268 /* Set Triple, CPU and Features here */
269 Triple = TARGET_TRIPLE_STRING;
270
271#if defined(DEFAULT_ARM_CODEGEN) || defined(PROVIDE_ARM_CODEGEN)
272 LLVMInitializeARMTargetInfo();
273 LLVMInitializeARMTarget();
274#endif
275
276#if defined(DEFAULT_X86_CODEGEN) || defined(PROVIDE_X86_CODEGEN)
277 LLVMInitializeX86TargetInfo();
278 LLVMInitializeX86Target();
279#endif
280
281#if defined(DEFAULT_X64_CODEGEN) || defined(PROVIDE_X64_CODEGEN)
282 LLVMInitializeX86TargetInfo();
283 LLVMInitializeX86Target();
284#endif
285
286 /*
287 * -O0: llvm::CodeGenOpt::None
288 * -O1: llvm::CodeGenOpt::Less
289 * -O2: llvm::CodeGenOpt::Default
290 * -O3: llvm::CodeGenOpt::Aggressive
291 */
292 CodeGenOptLevel = llvm::CodeGenOpt::Aggressive;
293
294 /* Below are the global settings to LLVM */
295
296 /* Disable frame pointer elimination optimization */
297 llvm::NoFramePointerElim = false;
298
299 /*
300 * Use hardfloat ABI
301 *
302 * FIXME: Need to detect the CPU capability and decide whether to use
303 * softfp. To use softfp, change following 2 lines to
304 *
305 * llvm::FloatABIType = llvm::FloatABI::Soft;
306 * llvm::UseSoftFloat = true;
307 */
308 llvm::FloatABIType = llvm::FloatABI::Hard;
309 llvm::UseSoftFloat = false;
310
311 /*
312 * BCC needs all unknown symbols resolved at JIT/compilation time.
313 * So we don't need any dynamic relocation model.
314 */
315 llvm::TargetMachine::setRelocationModel(llvm::Reloc::Static);
316
317#ifdef DEFAULT_X64_CODEGEN
318 /* Data address in X86_64 architecture may reside in a far-away place */
319 llvm::TargetMachine::setCodeModel(llvm::CodeModel::Medium);
320#else
321 /*
322 * This is set for the linker (specify how large of the virtual addresses
323 * we can access for all unknown symbols.)
324 */
325 llvm::TargetMachine::setCodeModel(llvm::CodeModel::Small);
326#endif
327
328 /* Register the scheduler */
329 llvm::RegisterScheduler::setDefault(llvm::createDefaultScheduler);
330
331 /*
332 * Register allocation policy:
333 * createLocalRegisterAllocator: fast but bad quality
334 * createLinearScanRegisterAllocator: not so fast but good quality
335 */
336 llvm::RegisterRegAlloc::setDefault
337 ((CodeGenOptLevel == llvm::CodeGenOpt::None) ?
338 llvm::createLocalRegisterAllocator :
339 llvm::createLinearScanRegisterAllocator);
340
341 GlobalInitialized = true;
342 return;
343 }
344
345 static void LLVMErrorHandler(void *UserData, const std::string &Message) {
346 // std::string* Error = static_cast<std::string*>(UserData);
347 // Error->assign(Message);
348 // return;
349 fprintf(stderr, "%s\n", Message.c_str());
350 exit(1);
351 }
352
353 static const llvm::StringRef PragmaMetadataName;
354
355 private:
356 std::string mError;
357
358 inline bool hasError() const {
359 return !mError.empty();
360 }
361 inline void setError(const char* Error) {
362 mError.assign(Error); // Copying
363 return;
364 }
365 inline void setError(const std::string& Error) {
366 mError = Error;
367 return;
368 }
369
370 typedef std::list< std::pair<std::string, std::string> > PragmaList;
371 PragmaList mPragmas;
372
373 /* Memory manager for the code reside in memory */
374 /*
375 * The memory for our code emitter is very simple and is conforming to the
376 * design decisions of Android RenderScript's Exection Environment:
377 * The code, data, and symbol sizes are limited (currently 100KB.)
378 *
379 * It's very different from typical compiler, which has no limitation
380 * on the code size. How does code emitter know the size of the code
381 * it is about to emit? It does not know beforehand. We want to solve
382 * this without complicating the code emitter too much.
383 *
384 * We solve this by pre-allocating a certain amount of memory,
385 * and then start the code emission. Once the buffer overflows, the emitter
386 * simply discards all the subsequent emission but still has a counter
387 * on how many bytes have been emitted.
388
389 * So once the whole emission is done, if there's a buffer overflow,
390 * it re-allocates the buffer with enough size (based on the
391 * counter from previous emission) and re-emit again.
392 */
393 class CodeMemoryManager : public llvm::JITMemoryManager {
394 /* {{{ */
395 private:
396 static const unsigned int MaxCodeSize = 100 * 1024; /* 100 KiB for code */
397 static const unsigned int MaxGOTSize = 1 * 1024; /* 1 KiB for global
398 offset table (GOT) */
399
400 /*
401 * Our memory layout is as follows:
402 *
403 * The direction of arrows (-> and <-) shows memory's growth direction
404 * when more space is needed.
405 *
406 * @mpCodeMem:
407 * +--------------------------------------------------------------+
408 * | Function Memory ... -> <- ... Global/Stub/GOT |
409 * +--------------------------------------------------------------+
410 * |<------------------ Total: @MaxCodeSize KiB ----------------->|
411 *
412 * Where size of GOT is @MaxGOTSize KiB.
413 *
414 * @mCurFuncMemIdx: The current index (starting from 0) of the last byte
415 * of function code's memoey usage
416 * @mCurGSGMemIdx: The current index (starting from 0) of the last byte
417 * of Global Stub/GOT's memory usage
418 *
419 */
420
421 intptr_t mCurFuncMemIdx;
422 intptr_t mCurGSGMemIdx;
423 llvm::sys::MemoryBlock* mpCodeMem;
424
425 /* GOT Base */
426 uint8_t* mpGOTBase;
427
428 typedef std::map<const llvm::Function*, pair<void* /* start address */,
429 void* /* end address */>
430 > FunctionMapTy;
431 FunctionMapTy mFunctionMap;
432
433 inline intptr_t getFreeMemSize() const {
434 return mCurGSGMemIdx - mCurFuncMemIdx;
435 }
436 inline uint8_t* getCodeMemBase() const {
437 return static_cast<uint8_t*>(mpCodeMem->base());
438 }
439
440 uint8_t* allocateGSGMemory(uintptr_t Size,
441 unsigned Alignment = 1 /* no alignment */)
442 {
443 if(getFreeMemSize() < Size)
444 /* The code size excesses our limit */
445 return NULL;
446
447 if(Alignment == 0)
448 Alignment = 1;
449
450 uint8_t* result = getCodeMemBase() + mCurGSGMemIdx - Size;
451 result = (uint8_t*) (((intptr_t) result) & ~(intptr_t) (Alignment - 1));
452
453 mCurGSGMemIdx = result - getCodeMemBase();
454
455 return result;
456 }
457
458 public:
459 CodeMemoryManager() : mpCodeMem(NULL), mpGOTBase(NULL) {
460 reset();
461 std::string ErrMsg;
462 llvm::sys::MemoryBlock B = llvm::sys::Memory::
463 AllocateRWX(MaxCodeSize, NULL, &ErrMsg);
464 if(B.base() == 0)
465 llvm::llvm_report_error(
466 "Failed to allocate Memory for code emitter\n" + ErrMsg
467 );
468 mpCodeMem = new llvm::sys::MemoryBlock(B.base(), B.size());
469
470 return;
471 }
472
473 /*
474 * setMemoryWritable - When code generation is in progress,
475 * the code pages may need permissions changed.
476 */
477 void setMemoryWritable() {
478 llvm::sys::Memory::setWritable(*mpCodeMem);
479 return;
480 }
481
482 /*
483 * setMemoryExecutable - When code generation is done and we're ready to
484 * start execution, the code pages may need permissions changed.
485 */
486 void setMemoryExecutable() {
487 llvm::sys::Memory::setExecutable(*mpCodeMem);
488 return;
489 }
490
491 /*
492 * setPoisonMemory - Setting this flag to true makes the memory manager
493 * garbage values over freed memory. This is useful for testing and
494 * debugging, and is to be turned on by default in debug mode.
495 */
496 void setPoisonMemory(bool poison) {
497 /* no effect */
498 return;
499 }
500
501 /* Global Offset Table Management */
502
503 /*
504 * AllocateGOT - If the current table requires a Global Offset Table, this
505 * method is invoked to allocate it. This method is required to set HasGOT
506 * to true.
507 */
508 void AllocateGOT() {
509 assert(mpGOTBase != NULL && "Cannot allocate the GOT multiple times");
510 mpGOTBase = allocateGSGMemory(MaxGOTSize);
511 HasGOT = true;
512 return;
513 }
514
515 /*
516 * getGOTBase - If this is managing a Global Offset Table, this method
517 * should return a pointer to its base.
518 */
519 uint8_t* getGOTBase() const {
520 return mpGOTBase;
521 }
522
523 /* Main Allocation Functions */
524
525 /*
526 * startFunctionBody - When we start JITing a function, the JIT calls this
527 * method to allocate a block of free RWX memory, which returns a pointer to
528 * it. If the JIT wants to request a block of memory of at least a certain
529 * size, it passes that value as ActualSize, and this method returns a block
530 * with at least that much space. If the JIT doesn't know ahead of time how
531 * much space it will need to emit the function, it passes 0 for the
532 * ActualSize. In either case, this method is required to pass back the size
533 * of the allocated block through ActualSize. The JIT will be careful to
534 * not write more than the returned ActualSize bytes of memory.
535 */
536 uint8_t* startFunctionBody(const llvm::Function *F, uintptr_t &ActualSize) {
537 if(getFreeMemSize() < ActualSize)
538 /* The code size excesses our limit */
539 return NULL;
540
541 ActualSize = getFreeMemSize();
542 return (getCodeMemBase() + mCurFuncMemIdx);
543 }
544
545 /*
546 * allocateStub - This method is called by the JIT to allocate space for a
547 * function stub (used to handle limited branch displacements) while it is
548 * JIT compiling a function. For example, if foo calls bar, and if bar
549 * either needs to be lazily compiled or is a native function that exists
550 * too
551 * far away from the call site to work, this method will be used to make a
552 * thunk for it. The stub should be "close" to the current function body,
553 * but should not be included in the 'actualsize' returned by
554 * startFunctionBody.
555 */
556 uint8_t* allocateStub(const llvm::GlobalValue* F, unsigned StubSize,
557 unsigned Alignment) {
558 return allocateGSGMemory(StubSize, Alignment);
559 }
560
561 /*
562 * endFunctionBody - This method is called when the JIT is done codegen'ing
563 * the specified function. At this point we know the size of the JIT
564 * compiled function. This passes in FunctionStart (which was returned by
565 * the startFunctionBody method) and FunctionEnd which is a pointer to the
566 * actual end of the function. This method should mark the space allocated
567 * and remember where it is in case the client wants to deallocate it.
568 */
569 void endFunctionBody(const llvm::Function* F, uint8_t* FunctionStart,
570 uint8_t* FunctionEnd) {
571 assert(FunctionEnd > FunctionStart);
572 assert(FunctionStart == (getCodeMemBase() + mCurFuncMemIdx) &&
573 "Mismatched function start/end!");
574
575 /* Advance the pointer */
576 intptr_t FunctionCodeSize = FunctionEnd - FunctionStart;
577 assert(FunctionCodeSize <= getFreeMemSize() &&
578 "Code size excess the limitation!");
579 mCurFuncMemIdx += FunctionCodeSize;
580
581 /* Record there's a function in our memory start from @FunctionStart */
582 assert(mFunctionMap.find(F) == mFunctionMap.end() &&
583 "Function already emitted!");
584 mFunctionMap.insert( make_pair<const llvm::Function*, pair<void*, void*>
585 >(F, make_pair(FunctionStart, FunctionEnd))
586 );
587
588 return;
589 }
590
591 /*
592 * allocateSpace - Allocate a (function code) memory block of the
593 * given size. This method cannot be called between
594 * calls to startFunctionBody and endFunctionBody.
595 */
596 uint8_t* allocateSpace(intptr_t Size, unsigned Alignment) {
597 if(getFreeMemSize() < Size)
598 /* The code size excesses our limit */
599 return NULL;
600
601 if(Alignment == 0)
602 Alignment = 1;
603
604 uint8_t* result = getCodeMemBase() + mCurFuncMemIdx;
605 result = (uint8_t*) (((intptr_t) result + Alignment - 1) &
606 ~(intptr_t) (Alignment - 1)
607 );
608
609 mCurFuncMemIdx = (result + Size) - getCodeMemBase();
610
611 return result;
612 }
613
614 /* allocateGlobal - Allocate memory for a global. */
615 uint8_t* allocateGlobal(uintptr_t Size, unsigned Alignment) {
616 return allocateGSGMemory(Size, Alignment);
617 }
618
619 /*
620 * deallocateFunctionBody - Free the specified function body. The argument
621 * must be the return value from a call to startFunctionBody() that hasn't
622 * been deallocated yet. This is never called when the JIT is currently
623 * emitting a function.
624 */
625 void deallocateFunctionBody(void *Body) {
626 /* linear search */
627 FunctionMapTy::iterator I;
628 for(I = mFunctionMap.begin();
629 I != mFunctionMap.end();
630 I++)
631 if(I->second.first == Body)
632 break;
633
634 assert(I != mFunctionMap.end() && "Memory is never allocated!");
635
636 /* free the memory */
637 uint8_t* FunctionStart = (uint8_t*) I->second.first;
638 uint8_t* FunctionEnd = (uint8_t*) I->second.second;
639 intptr_t SizeNeedMove = (getCodeMemBase() + mCurFuncMemIdx) - FunctionEnd;
640
641 assert(SizeNeedMove >= 0 &&
642 "Internal error: CodeMemoryManager::mCurFuncMemIdx may not"
643 " be correctly calculated!");
644
645 if(SizeNeedMove > 0)
646 /* there's data behind deallocating function */
647 ::memmove(FunctionStart, FunctionEnd, SizeNeedMove);
648 mCurFuncMemIdx -= (FunctionEnd - FunctionStart);
649
650 return;
651 }
652
653 /*
654 * startExceptionTable - When we finished JITing the function, if exception
655 * handling is set, we emit the exception table.
656 */
657 uint8_t* startExceptionTable(const llvm::Function* F, uintptr_t &ActualSize)
658 {
659 assert(false && "Exception is not allowed in our language specification");
660 return NULL;
661 }
662
663 /*
664 * endExceptionTable - This method is called when the JIT is done emitting
665 * the exception table.
666 */
667 void endExceptionTable(const llvm::Function *F, uint8_t *TableStart,
668 uint8_t *TableEnd, uint8_t* FrameRegister) {
669 assert(false && "Exception is not allowed in our language specification");
670 return;
671 }
672
673 /*
674 * deallocateExceptionTable - Free the specified exception table's memory.
675 * The argument must be the return value from a call to
676 * startExceptionTable()
677 * that hasn't been deallocated yet. This is never called when the JIT is
678 * currently emitting an exception table.
679 */
680 void deallocateExceptionTable(void *ET) {
681 assert(false && "Exception is not allowed in our language specification");
682 return;
683 }
684
685 /* Below are the methods we create */
686 void reset() {
687 mpGOTBase = NULL;
688 HasGOT = false;
689
690 mCurFuncMemIdx = 0;
691 mCurGSGMemIdx = MaxCodeSize - 1;
692
693 mFunctionMap.clear();
694
695 return;
696 }
697
698 ~CodeMemoryManager() {
699 if(mpCodeMem != NULL)
700 llvm::sys::Memory::ReleaseRWX(*mpCodeMem);
701 return;
702 }
703 /* }}} */
704 }; /* End of class CodeMemoryManager */
705
706 /* The memory manager for code emitter */
707 llvm::OwningPtr<CodeMemoryManager> mCodeMemMgr;
708 CodeMemoryManager* createCodeMemoryManager() {
709 mCodeMemMgr.reset(new CodeMemoryManager());
710 return mCodeMemMgr.get();
711 }
712
713 /* Code emitter */
714 class CodeEmitter : public llvm::JITCodeEmitter {
715 /* {{{ */
716 public:
717 typedef llvm::DenseMap<const llvm::GlobalValue*, void*> GlobalAddressMapTy;
718 typedef GlobalAddressMapTy::const_iterator global_addresses_const_iterator;
719
720 private:
721 CodeMemoryManager* mpMemMgr;
722
723 /* The JITInfo for the target we are compiling to */
724 llvm::TargetJITInfo* mpTJI;
725
726 const llvm::TargetData* mpTD;
727
728 /*
729 * MBBLocations - This vector is a mapping from MBB ID's to their address.
730 * It is filled in by the StartMachineBasicBlock callback and queried by
731 * the getMachineBasicBlockAddress callback.
732 */
733 std::vector<uintptr_t> mMBBLocations;
734
735 /* ConstantPool - The constant pool for the current function. */
736 llvm::MachineConstantPool* mpConstantPool;
737
738 /* ConstantPoolBase - A pointer to the first entry in the constant pool. */
739 void *mpConstantPoolBase;
740
741 /* ConstPoolAddresses - Addresses of individual constant pool entries. */
742 llvm::SmallVector<uintptr_t, 8> mConstPoolAddresses;
743
744 /* JumpTable - The jump tables for the current function. */
745 llvm::MachineJumpTableInfo *mpJumpTable;
746
747 /* JumpTableBase - A pointer to the first entry in the jump table. */
748 void *mpJumpTableBase;
749
750 /*
751 * When outputting a function stub in the context of some other function, we
752 * save BufferBegin/BufferEnd/CurBufferPtr here.
753 */
754 uint8_t *mpSavedBufferBegin, *mpSavedBufferEnd, *mpSavedCurBufferPtr;
755
756 /* Relocations - These are the relocations that the function needs,
757 as emitted. */
758 std::vector<llvm::MachineRelocation> mRelocations;
759
760 /* LabelLocations - This vector is a mapping from Label ID's to their
761 address. */
762 std::vector<uintptr_t> mLabelLocations;
763
764 class EmittedFunctionCode {
765 public:
766 void* FunctionBody; // Beginning of the function's allocation.
767 void* Code; // The address the function's code actually starts at.
768 int Size; // The size of the function code
769
770 EmittedFunctionCode() : FunctionBody(NULL), Code(NULL) { return; }
771 };
772 EmittedFunctionCode* mpCurEmitFunction;
773
774 typedef std::map<const std::string, EmittedFunctionCode*
775 > EmittedFunctionsMapTy;
776 EmittedFunctionsMapTy mEmittedFunctions;
777
778 /* MMI - Machine module info for exception informations */
779 llvm::MachineModuleInfo* mpMMI;
780
781 GlobalAddressMapTy mGlobalAddressMap;
782
783 /*
784 * UpdateGlobalMapping - Replace an existing mapping for GV with a new
785 * address. This updates both maps as required. If "Addr" is null, the
786 * entry for the global is removed from the mappings.
787 */
788 void* UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
789 if(Addr == NULL) {
790 /* Removing mapping */
791 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
792 void *OldVal;
793
794 if(I == mGlobalAddressMap.end())
795 OldVal = NULL;
796 else {
797 OldVal = I->second;
798 mGlobalAddressMap.erase(I);
799 }
800
801 return OldVal;
802 }
803
804 void*& CurVal = mGlobalAddressMap[GV];
805 void* OldVal = CurVal;
806
807 CurVal = Addr;
808
809 return OldVal;
810 }
811
812 /*
813 * AddGlobalMapping - Tell the execution engine that the specified global is
814 * at the specified location. This is used internally as functions are
815 * JIT'd
816 * and as global variables are laid out in memory.
817 */
818 void AddGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
819 void*& CurVal = mGlobalAddressMap[GV];
820 assert((CurVal == 0 || Addr == 0) && "GlobalMapping already established!");
821 CurVal = Addr;
822 return;
823 }
824
825 /*
826 * GetPointerToGlobalIfAvailable - This returns the address of the specified
827 * global value if it is has already been codegen'd,
828 * otherwise it returns null.
829 */
830 void* GetPointerToGlobalIfAvailable(const llvm::GlobalValue* GV) const {
831 GlobalAddressMapTy::const_iterator I = mGlobalAddressMap.find(GV);
832 return ((I != mGlobalAddressMap.end()) ? I->second : NULL);
833 }
834
835 unsigned int GetConstantPoolSizeInBytes(llvm::MachineConstantPool* MCP) {
836 const std::vector<llvm::MachineConstantPoolEntry>& Constants =
837 MCP->getConstants();
838
839 if(Constants.empty())
840 return 0;
841
842 unsigned int Size = 0;
843 for(int i=0;i<Constants.size();i++) {
844 llvm::MachineConstantPoolEntry CPE = Constants[i];
845 unsigned int AlignMask = CPE.getAlignment() - 1;
846 Size = (Size + AlignMask) & ~AlignMask;
847 const llvm::Type* Ty = CPE.getType();
848 Size += mpTD->getTypeAllocSize(Ty);
849 }
850
851 return Size;
852 }
853
854 /*
855 * This function converts a Constant* into a GenericValue. The interesting
856 * part is if C is a ConstantExpr.
857 */
858 void GetConstantValue(const llvm::Constant *C, llvm::GenericValue& Result) {
859 if(C->getValueID() == llvm::Value::UndefValueVal)
860 return;
861 else if(C->getValueID() == llvm::Value::ConstantExprVal) {
862 const llvm::ConstantExpr* CE = (llvm::ConstantExpr*) C;
863 const llvm::Constant* Op0 = CE->getOperand(0);
864
865 switch(CE->getOpcode()) {
866 case llvm::Instruction::GetElementPtr:
867 {
868 /* Compute the index */
869 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
870 CE->op_end());
871 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
872 &Indices[0],
873 Indices.size());
874
875 GetConstantValue(Op0, Result);
876 Result.PointerVal = (char*) Result.PointerVal + Offset;
877
878 return;
879 }
880 break;
881
882 case llvm::Instruction::Trunc:
883 {
884 uint32_t BitWidth =
885 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
886
887 GetConstantValue(Op0, Result);
888 Result.IntVal = Result.IntVal.trunc(BitWidth);
889
890 return;
891 }
892 break;
893
894 case llvm::Instruction::ZExt:
895 {
896 uint32_t BitWidth =
897 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
898
899 GetConstantValue(Op0, Result);
900 Result.IntVal = Result.IntVal.zext(BitWidth);
901
902 return;
903 }
904 break;
905
906 case llvm::Instruction::SExt:
907 {
908 uint32_t BitWidth =
909 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
910
911 GetConstantValue(Op0, Result);
912 Result.IntVal = Result.IntVal.sext(BitWidth);
913
914 return;
915 }
916 break;
917
918
919 case llvm::Instruction::FPTrunc:
920 {
921 /* FIXME long double */
922 GetConstantValue(Op0, Result);
923 Result.FloatVal = float(Result.DoubleVal);
924 return;
925 }
926 break;
927
928
929 case llvm::Instruction::FPExt:
930 {
931 /* FIXME long double */
932 GetConstantValue(Op0, Result);
933 Result.DoubleVal = double(Result.FloatVal);
934 return;
935 }
936 break;
937
938
939 case llvm::Instruction::UIToFP:
940 {
941 GetConstantValue(Op0, Result);
942 if(CE->getType()->isFloatTy())
943 Result.FloatVal = float(Result.IntVal.roundToDouble());
944 else if(CE->getType()->isDoubleTy())
945 Result.DoubleVal = Result.IntVal.roundToDouble();
946 else if(CE->getType()->isX86_FP80Ty()) {
947 const uint64_t zero[] = { 0, 0 };
948 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
949 apf.convertFromAPInt(Result.IntVal,
950 false,
951 llvm::APFloat::rmNearestTiesToEven);
952 Result.IntVal = apf.bitcastToAPInt();
953 }
954 return;
955 }
956 break;
957
958 case llvm::Instruction::SIToFP:
959 {
960 GetConstantValue(Op0, Result);
961 if(CE->getType()->isFloatTy())
962 Result.FloatVal = float(Result.IntVal.signedRoundToDouble());
963 else if(CE->getType()->isDoubleTy())
964 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
965 else if(CE->getType()->isX86_FP80Ty()) {
966 const uint64_t zero[] = { 0, 0 };
967 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
968 apf.convertFromAPInt(Result.IntVal,
969 true,
970 llvm::APFloat::rmNearestTiesToEven);
971 Result.IntVal = apf.bitcastToAPInt();
972 }
973 return;
974 }
975 break;
976
977 /* double->APInt conversion handles sign */
978 case llvm::Instruction::FPToUI:
979 case llvm::Instruction::FPToSI:
980 {
981 uint32_t BitWidth =
982 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
983
984 GetConstantValue(Op0, Result);
985 if(Op0->getType()->isFloatTy())
986 Result.IntVal =
987 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
988 else if(Op0->getType()->isDoubleTy())
989 Result.IntVal =
990 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal, BitWidth);
991 else if(Op0->getType()->isX86_FP80Ty()) {
992 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
993 uint64_t v;
994 bool ignored;
995 apf.convertToInteger(&v,
996 BitWidth,
997 CE->getOpcode()
998 == llvm::Instruction::FPToSI,
999 llvm::APFloat::rmTowardZero,
1000 &ignored);
1001 Result.IntVal = v; // endian?
1002 }
1003 return;
1004 }
1005 break;
1006
1007 case llvm::Instruction::PtrToInt:
1008 {
1009 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
1010
1011 GetConstantValue(Op0, Result);
1012 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
1013 (Result.PointerVal));
1014
1015 return;
1016 }
1017 break;
1018
1019 case llvm::Instruction::IntToPtr:
1020 {
1021 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
1022
1023 GetConstantValue(Op0, Result);
1024 if(PtrWidth != Result.IntVal.getBitWidth())
1025 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
1026 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
1027
1028 Result.PointerVal = llvm::PointerTy
1029 (uintptr_t(Result.IntVal.getZExtValue()));
1030
1031 return;
1032 }
1033 break;
1034
1035 case llvm::Instruction::BitCast:
1036 {
1037 GetConstantValue(Op0, Result);
1038 const llvm::Type* DestTy = CE->getType();
1039
1040 switch(Op0->getType()->getTypeID()) {
1041 case llvm::Type::IntegerTyID:
1042 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
1043 if(DestTy->isFloatTy())
1044 Result.FloatVal = Result.IntVal.bitsToFloat();
1045 else if(DestTy->isDoubleTy())
1046 Result.DoubleVal = Result.IntVal.bitsToDouble();
1047 break;
1048
1049 case llvm::Type::FloatTyID:
1050 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
1051 Result.IntVal.floatToBits(Result.FloatVal);
1052 break;
1053
1054 case llvm::Type::DoubleTyID:
1055 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
1056 Result.IntVal.doubleToBits(Result.DoubleVal);
1057 break;
1058
1059 case llvm::Type::PointerTyID:
1060 assert(DestTy->isPointerTy() && "Invalid bitcast");
1061 break; // getConstantValue(Op0) above already converted it
1062
1063 default:
1064 llvm_unreachable("Invalid bitcast operand");
1065 break;
1066 }
1067
1068 return;
1069 }
1070 break;
1071
1072 case llvm::Instruction::Add:
1073 case llvm::Instruction::FAdd:
1074 case llvm::Instruction::Sub:
1075 case llvm::Instruction::FSub:
1076 case llvm::Instruction::Mul:
1077 case llvm::Instruction::FMul:
1078 case llvm::Instruction::UDiv:
1079 case llvm::Instruction::SDiv:
1080 case llvm::Instruction::URem:
1081 case llvm::Instruction::SRem:
1082 case llvm::Instruction::And:
1083 case llvm::Instruction::Or:
1084 case llvm::Instruction::Xor:
1085 {
1086 llvm::GenericValue LHS, RHS;
1087 GetConstantValue(Op0, LHS);
1088 GetConstantValue(CE->getOperand(1), RHS);
1089
1090 switch(Op0->getType()->getTypeID()) {
1091 case llvm::Type::IntegerTyID:
1092 switch (CE->getOpcode()) {
1093 case llvm::Instruction::Add:
1094 Result.IntVal = LHS.IntVal + RHS.IntVal;
1095 break;
1096 case llvm::Instruction::Sub:
1097 Result.IntVal = LHS.IntVal - RHS.IntVal;
1098 break;
1099 case llvm::Instruction::Mul:
1100 Result.IntVal = LHS.IntVal * RHS.IntVal;
1101 break;
1102 case llvm::Instruction::UDiv:
1103 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
1104 break;
1105 case llvm::Instruction::SDiv:
1106 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
1107 break;
1108 case llvm::Instruction::URem:
1109 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
1110 break;
1111 case llvm::Instruction::SRem:
1112 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
1113 break;
1114 case llvm::Instruction::And:
1115 Result.IntVal = LHS.IntVal & RHS.IntVal;
1116 break;
1117 case llvm::Instruction::Or:
1118 Result.IntVal = LHS.IntVal | RHS.IntVal;
1119 break;
1120 case llvm::Instruction::Xor:
1121 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
1122 break;
1123 default:
1124 llvm_unreachable("Invalid integer opcode");
1125 break;
1126 }
1127 break;
1128
1129 case llvm::Type::FloatTyID:
1130 switch (CE->getOpcode()) {
1131 case llvm::Instruction::FAdd:
1132 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
1133 break;
1134 case llvm::Instruction::FSub:
1135 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
1136 break;
1137 case llvm::Instruction::FMul:
1138 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
1139 break;
1140 case llvm::Instruction::FDiv:
1141 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
1142 break;
1143 case llvm::Instruction::FRem:
1144 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
1145 break;
1146 default:
1147 llvm_unreachable("Invalid float opcode");
1148 break;
1149 }
1150 break;
1151
1152 case llvm::Type::DoubleTyID:
1153 switch (CE->getOpcode()) {
1154 case llvm::Instruction::FAdd:
1155 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
1156 break;
1157 case llvm::Instruction::FSub:
1158 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
1159 break;
1160 case llvm::Instruction::FMul:
1161 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
1162 break;
1163 case llvm::Instruction::FDiv:
1164 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
1165 break;
1166 case llvm::Instruction::FRem:
1167 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
1168 break;
1169 default:
1170 llvm_unreachable("Invalid double opcode");
1171 break;
1172 }
1173 break;
1174
1175 case llvm::Type::X86_FP80TyID:
1176 case llvm::Type::PPC_FP128TyID:
1177 case llvm::Type::FP128TyID:
1178 {
1179 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
1180 switch (CE->getOpcode()) {
1181 case llvm::Instruction::FAdd:
1182 apfLHS.add(llvm::APFloat(RHS.IntVal),
1183 llvm::APFloat::rmNearestTiesToEven);
1184 break;
1185 case llvm::Instruction::FSub:
1186 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
1187 llvm::APFloat::rmNearestTiesToEven);
1188 break;
1189 case llvm::Instruction::FMul:
1190 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
1191 llvm::APFloat::rmNearestTiesToEven);
1192 break;
1193 case llvm::Instruction::FDiv:
1194 apfLHS.divide(llvm::APFloat(RHS.IntVal),
1195 llvm::APFloat::rmNearestTiesToEven);
1196 break;
1197 case llvm::Instruction::FRem:
1198 apfLHS.mod(llvm::APFloat(RHS.IntVal),
1199 llvm::APFloat::rmNearestTiesToEven);
1200 break;
1201 default:
1202 llvm_unreachable("Invalid long double opcode");
1203 llvm_unreachable(0);
1204 break;
1205 }
1206
1207 Result.IntVal = apfLHS.bitcastToAPInt();
1208 }
1209 break;
1210
1211 default:
1212 llvm_unreachable("Bad add type!");
1213 break;
1214 } /* End switch(Op0->getType()->getTypeID()) */
1215
1216 return;
1217 }
1218
1219 default:
1220 break;
1221 } /* End switch(CE->getOpcode()) */
1222
1223 std::string msg;
1224 llvm::raw_string_ostream Msg(msg);
1225 Msg << "ConstantExpr not handled: " << *CE;
1226 llvm::llvm_report_error(Msg.str());
1227 } /* C->getValueID() == llvm::Value::ConstantExprVal */
1228
1229 switch (C->getType()->getTypeID()) {
1230 case llvm::Type::FloatTyID:
1231 Result.FloatVal = llvm::cast<llvm::ConstantFP>(C)
1232 ->getValueAPF().convertToFloat();
1233 break;
1234
1235 case llvm::Type::DoubleTyID:
1236 Result.DoubleVal = llvm::cast<llvm::ConstantFP>(C)
1237 ->getValueAPF().convertToDouble();
1238 break;
1239
1240 case llvm::Type::X86_FP80TyID:
1241 case llvm::Type::FP128TyID:
1242 case llvm::Type::PPC_FP128TyID:
1243 Result.IntVal = llvm::cast <llvm::ConstantFP>(C)
1244 ->getValueAPF().bitcastToAPInt();
1245 break;
1246
1247 case llvm::Type::IntegerTyID:
1248 Result.IntVal = llvm::cast<llvm::ConstantInt>(C)
1249 ->getValue();
1250 break;
1251
1252 case llvm::Type::PointerTyID:
1253 switch(C->getValueID()) {
1254 case llvm::Value::ConstantPointerNullVal:
1255 Result.PointerVal = NULL;
1256 break;
1257
1258 case llvm::Value::FunctionVal:
1259 {
1260 const llvm::Function* F = (llvm::Function*) C;
1261 Result.PointerVal = GetPointerToFunctionOrStub
1262 (const_cast<llvm::Function*>(F)
1263 );
1264 }
1265 break;
1266
1267 case llvm::Value::GlobalVariableVal:
1268 {
1269 const llvm::GlobalVariable* GV = (llvm::GlobalVariable*) C;
1270 Result.PointerVal = GetOrEmitGlobalVariable
1271 (const_cast<llvm::GlobalVariable*>(GV)
1272 );
1273 }
1274 break;
1275
1276 case llvm::Value::BlockAddressVal:
1277 {
1278 // const llvm::BlockAddress* BA = (llvm::BlockAddress*) C;
1279 // Result.PointerVal = getPointerToBasicBlock
1280 // (const_cast<llvm::BasicBlock*>(BA->getBasicBlock()));
1281 assert(false && "JIT does not support address-of-label yet!");
1282 }
1283 break;
1284
1285 default:
1286 llvm_unreachable("Unknown constant pointer type!");
1287 break;
1288 }
1289 break;
1290
1291 default:
1292 std::string msg;
1293 llvm::raw_string_ostream Msg(msg);
1294 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
1295 llvm::llvm_report_error(Msg.str());
1296 break;
1297 }
1298
1299 return;
1300 }
1301
1302 /*
1303 * StoreValueToMemory -
1304 * Stores the data in @Val of type @Ty at address @Addr.
1305 */
1306 void StoreValueToMemory(const llvm::GenericValue& Val, void* Addr,
1307 const llvm::Type *Ty) {
1308 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
1309
1310 switch(Ty->getTypeID()) {
1311 case llvm::Type::IntegerTyID:
1312 {
1313 const llvm::APInt& IntVal = Val.IntVal;
1314 assert((IntVal.getBitWidth() + 7) / 8 >= StoreBytes &&
1315 "Integer too small!");
1316
1317 uint8_t *Src = (uint8_t*) IntVal.getRawData();
1318
1319 if(llvm::sys::isLittleEndianHost()) {
1320 /*
1321 * Little-endian host - the source is ordered from LSB to MSB.
1322 * Order the destination from LSB to MSB: Do a straight copy.
1323 */
1324 memcpy(Addr, Src, StoreBytes);
1325 } else {
1326 /*
1327 * Big-endian host - the source is an array of 64 bit words
1328 * ordered from LSW to MSW.
1329 *
1330 * Each word is ordered from MSB to LSB.
1331 *
1332 * Order the destination from MSB to LSB:
1333 * Reverse the word order, but not the bytes in a word.
1334 */
1335 unsigned int i = StoreBytes;
1336 while(i > sizeof(uint64_t)) {
1337 i -= sizeof(uint64_t);
1338 memcpy((uint8_t*) Addr + i, Src, sizeof(uint64_t));
1339 Src += sizeof(uint64_t);
1340 }
1341
1342 memcpy(Addr, Src + sizeof(uint64_t) - i, i);
1343 }
1344 }
1345 break;
1346
1347 case llvm::Type::FloatTyID:
1348 {
1349 *((float*) Addr) = Val.FloatVal;
1350 }
1351 break;
1352
1353 case llvm::Type::DoubleTyID:
1354 {
1355 *((double*) Addr) = Val.DoubleVal;
1356 }
1357 break;
1358
1359 case llvm::Type::X86_FP80TyID:
1360 {
1361 memcpy(Addr, Val.IntVal.getRawData(), 10);
1362 }
1363 break;
1364
1365 case llvm::Type::PointerTyID:
1366 {
1367 /*
1368 * Ensure 64 bit target pointers are fully
1369 * initialized on 32 bit hosts.
1370 */
1371 if(StoreBytes != sizeof(llvm::PointerTy))
1372 memset(Addr, 0, StoreBytes);
1373 *((llvm::PointerTy*) Addr) = Val.PointerVal;
1374 }
1375 break;
1376
1377 default:
1378 break;
1379 }
1380
1381 if(llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
1382 std::reverse((uint8_t*) Addr, (uint8_t*) Addr + StoreBytes);
1383
1384 return;
1385 }
1386
1387 /*
1388 * InitializeConstantToMemory -
1389 * Recursive function to apply a @Constant value into the
1390 * specified memory location @Addr.
1391 */
1392 void InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
1393 switch(C->getValueID()) {
1394 case llvm::Value::UndefValueVal:
1395 // Nothing to do
1396 break;
1397
1398 case llvm::Value::ConstantVectorVal:
1399 {
1400 // dynamic cast may hurt performance
1401 const llvm::ConstantVector* CP = (llvm::ConstantVector*) C;
1402
1403 unsigned int ElementSize = mpTD->getTypeAllocSize
1404 (CP->getType()->getElementType());
1405
1406 for(int i=0;i<CP->getNumOperands();i++)
1407 InitializeConstantToMemory(CP->getOperand(i),
1408 (char*) Addr + i * ElementSize);
1409 }
1410 break;
1411
1412 case llvm::Value::ConstantAggregateZeroVal:
1413 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
1414 break;
1415
1416 case llvm::Value::ConstantArrayVal:
1417 {
1418 const llvm::ConstantArray* CPA = (llvm::ConstantArray*) C;
1419 unsigned int ElementSize = mpTD->getTypeAllocSize
1420 (CPA->getType()->getElementType());
1421
1422 for(int i=0;i<CPA->getNumOperands();i++)
1423 InitializeConstantToMemory(CPA->getOperand(i),
1424 (char*) Addr + i * ElementSize);
1425 }
1426 break;
1427
1428 case llvm::Value::ConstantStructVal:
1429 {
1430 const llvm::ConstantStruct* CPS = (llvm::ConstantStruct*) C;
1431 const llvm::StructLayout* SL = mpTD->getStructLayout
1432 (llvm::cast<llvm::StructType>(CPS->getType()));
1433
1434 for(int i=0;i<CPS->getNumOperands();i++)
1435 InitializeConstantToMemory(CPS->getOperand(i),
1436 (char*) Addr +
1437 SL->getElementOffset(i));
1438 }
1439 break;
1440
1441 default:
1442 {
1443 if(C->getType()->isFirstClassType()) {
1444 llvm::GenericValue Val;
1445 GetConstantValue(C, Val);
1446 StoreValueToMemory(Val, Addr, C->getType());
1447 } else
1448 llvm_unreachable
1449 ("Unknown constant type to initialize memory with!");
1450 }
1451 break;
1452 }
1453
1454 return;
1455 }
1456
1457 void emitConstantPool(llvm::MachineConstantPool *MCP) {
1458 if(mpTJI->hasCustomConstantPool())
1459 return;
1460
1461 /*
1462 * Constant pool address resolution is handled by the target itself in ARM
1463 * (TargetJITInfo::hasCustomConstantPool() return true).
1464 */
1465#if !defined(PROVIDE_ARM_CODEGEN)
1466 const std::vector<llvm::MachineConstantPoolEntry>& Constants =
1467 MCP->getConstants();
1468
1469 if(Constants.empty())
1470 return;
1471
1472 unsigned Size = GetConstantPoolSizeInBytes(MCP);
1473 unsigned Align = MCP->getConstantPoolAlignment();
1474
1475 mpConstantPoolBase = allocateSpace(Size, Align);
1476 mpConstantPool = MCP;
1477
1478 if(mpConstantPoolBase == NULL)
1479 return; /* out of memory */
1480
1481 unsigned Offset = 0;
1482 for(int i=0;i<Constants.size();i++) {
1483 llvm::MachineConstantPoolEntry CPE = Constants[i];
1484 unsigned AlignMask = CPE.getAlignment() - 1;
1485 Offset = (Offset + AlignMask) & ~AlignMask;
1486
1487 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
1488 mConstPoolAddresses.push_back(CAddr);
1489
1490 if(CPE.isMachineConstantPoolEntry())
1491 llvm::llvm_report_error
1492 ("Initialize memory with machine specific constant pool"
1493 " entry has not been implemented!");
1494
1495 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
1496
1497 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
1498 Offset += mpTD->getTypeAllocSize(Ty);
1499 }
1500#endif
1501 return;
1502 }
1503
1504 void initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
1505 if(mpTJI->hasCustomJumpTables())
1506 return;
1507
1508 const std::vector<llvm::MachineJumpTableEntry>& JT =
1509 MJTI->getJumpTables();
1510 if(JT.empty())
1511 return;
1512
1513 unsigned NumEntries = 0;
1514 for(int i=0;i<JT.size();i++)
1515 NumEntries += JT[i].MBBs.size();
1516
1517 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
1518
1519 mpJumpTable = MJTI;;
1520 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
1521 MJTI->getEntryAlignment(*mpTD));
1522
1523 return;
1524 }
1525
1526 void emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
1527 if(mpTJI->hasCustomJumpTables())
1528 return;
1529
1530 const std::vector<llvm::MachineJumpTableEntry>& JT =
1531 MJTI->getJumpTables();
1532 if(JT.empty() || mpJumpTableBase == 0)
1533 return;
1534
1535 assert((llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static)
1536 && "Cross JIT'ing?");
1537 assert(MJTI->getEntrySize(*mpTD) == sizeof(void*) && "Cross JIT'ing?");
1538
1539 /*
1540 * For each jump table, map each target in the jump table to the
1541 * address of an emitted MachineBasicBlock.
1542 */
1543 intptr_t *SlotPtr = (intptr_t*) mpJumpTableBase;
1544 for(int i=0;i<JT.size();i++) {
1545 const std::vector<llvm::MachineBasicBlock*>& MBBs = JT[i].MBBs;
1546 /*
1547 * Store the address of the basic block for this jump table slot in the
1548 * memory we allocated for the jump table in 'initJumpTableInfo'
1549 */
1550 for(int j=0;j<MBBs.size();j++)
1551 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
1552 }
1553 }
1554
1555 void* GetPointerToGlobal(llvm::GlobalValue* V, void* Reference,
1556 bool MayNeedFarStub) {
1557 switch(V->getValueID()) {
1558 case llvm::Value::FunctionVal:
1559 {
1560 llvm::Function* F = (llvm::Function*) V;
1561 void* FnStub = GetLazyFunctionStubIfAvailable(F);
1562
1563 if(FnStub)
1564 /*
1565 * Return the function stub if it's already created.
1566 * We do this first so that:
1567 * we're returning the same address for the function
1568 * as any previous call.
1569 *
1570 * TODO: Yes, this is wrong. The lazy stub isn't guaranteed
1571 * to be close enough to call.
1572 */
1573 return FnStub;
1574
1575 /*
1576 * If we know the target can handle arbitrary-distance calls, try to
1577 * return a direct pointer.
1578 */
1579 if(!MayNeedFarStub) {
1580 /* If we have code, go ahead and return that. */
1581 if(void* ResultPtr = GetPointerToGlobalIfAvailable(F))
1582 return ResultPtr;
1583
1584 /*
1585 * x86_64 architecture may encounter the bug
1586 * http://hlvm.llvm.org/bugs/show_bug.cgi?id=5201
1587 * which generate instruction "call" instead of "callq".
1588 *
1589 * And once the real address of stub is
1590 * greater than 64-bit long, the replacement will truncate
1591 * to 32-bit resulting a serious problem.
1592 */
1593#if !defined(__x86_64__)
1594 /*
1595 * If this is an external function pointer,
1596 * we can force the JIT to
1597 * 'compile' it, which really just adds it to the map.
1598 */
1599 if(F->isDeclaration() || F->hasAvailableExternallyLinkage())
1600 return GetPointerToFunction(F, /* AbortOnFailure */true);
1601#endif
1602 }
1603
1604 /*
1605 * Otherwise, we may need a to emit a stub, and, conservatively, we
1606 * always do so.
1607 */
1608 return GetLazyFunctionStub(F);
1609 }
1610 break;
1611
1612 case llvm::Value::GlobalVariableVal:
1613 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
1614 break;
1615
1616 case llvm::Value::GlobalAliasVal:
1617 {
1618 llvm::GlobalAlias* GA = (llvm::GlobalAlias*) V;
1619 const llvm::GlobalValue* GV = GA->resolveAliasedGlobal(false);
1620
1621 switch(GV->getValueID()) {
1622 case llvm::Value::FunctionVal:
1623 /* FIXME: is there's any possibility that the function
1624 is not code-gen'd? */
1625 return GetPointerToFunction(
1626 const_cast<llvm::Function*>((const llvm::Function*) GV),
1627 /* AbortOnFailure */true
1628 );
1629 break;
1630
1631 case llvm::Value::GlobalVariableVal:
1632 {
1633 if(void* p = mGlobalAddressMap[GV])
1634 return p;
1635
1636 llvm::GlobalVariable* GVar = (llvm::GlobalVariable*) GV;
1637 EmitGlobalVariable(GVar);
1638
1639 return mGlobalAddressMap[GV];
1640 }
1641 break;
1642
1643 case llvm::Value::GlobalAliasVal:
1644 assert(false && "Alias should be resolved ultimately!");
1645 break;
1646 }
1647 }
1648 break;
1649
1650 default:
1651 break;
1652 }
1653
1654 llvm_unreachable("Unknown type of global value!");
1655
1656 }
1657
1658 /*
1659 * GetPointerToFunctionOrStub - If the specified function has been
1660 * code-gen'd, return a pointer to the function.
1661 * If not, compile it, or use
1662 * a stub to implement lazy compilation if available.
1663 */
1664 void* GetPointerToFunctionOrStub(llvm::Function* F) {
1665 /*
1666 * If we have already code generated the function,
1667 * just return the address.
1668 */
1669 if(void* Addr = GetPointerToGlobalIfAvailable(F))
1670 return Addr;
1671
1672 /* Get a stub if the target supports it. */
1673 return GetLazyFunctionStub(F);
1674 }
1675
1676 typedef llvm::DenseMap<llvm::Function*, void*> FunctionToLazyStubMapTy;
1677 FunctionToLazyStubMapTy mFunctionToLazyStubMap;
1678
1679 void* GetLazyFunctionStubIfAvailable(llvm::Function* F) {
1680 return mFunctionToLazyStubMap.lookup(F);
1681 }
1682
1683 std::set<llvm::Function*> PendingFunctions;
1684 void* GetLazyFunctionStub(llvm::Function* F) {
1685 /* If we already have a lazy stub for this function, recycle it. */
1686 void*& Stub = mFunctionToLazyStubMap[F];
1687 if(Stub)
1688 return Stub;
1689
1690 /*
1691 * In any cases, we should NOT resolve function at runtime
1692 * (though we are able to).
1693 * We resolve this right now.
1694 */
1695 void* Actual = NULL;
1696 if(F->isDeclaration() || F->hasAvailableExternallyLinkage())
1697 Actual = GetPointerToFunction(F, /* AbortOnFailure */true);
1698
1699 /*
1700 * Codegen a new stub, calling the actual address of
1701 * the external function, if it was resolved.
1702 */
1703 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1704 startGVStub(F, SL.Size, SL.Alignment);
1705 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
1706 finishGVStub();
1707
1708 /*
1709 * We really want the address of the stub in the GlobalAddressMap
1710 * for the JIT, not the address of the external function.
1711 */
1712 UpdateGlobalMapping(F, Stub);
1713
1714 if(!Actual)
1715 PendingFunctions.insert(F);
1716 else
1717 Disassembler(F->getNameStr() + " (stub)",
1718 (uint8_t*) Stub, SL.Size, (uintptr_t) Stub);
1719
1720 return Stub;
1721 }
1722
1723 /* Our resolver to undefined symbol */
1724 BCCSymbolLookupFn mpSymbolLookupFn;
1725 void* mpSymbolLookupContext;
1726
1727 void* GetPointerToFunction(llvm::Function* F, bool AbortOnFailure) {
1728 void* Addr = GetPointerToGlobalIfAvailable(F);
1729 if(Addr)
1730 return Addr;
1731
1732 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
1733 "Internal error: only external defined function routes here!");
1734
1735 /* Handle the failure resolution by ourselves. */
1736 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
1737 /* AbortOnFailure */ false);
1738
1739 /*
1740 * If we resolved the symbol to a null address (eg. a weak external)
1741 * return a null pointer let the application handle it.
1742 */
1743 if(Addr == NULL)
1744 if(AbortOnFailure)
1745 llvm::llvm_report_error
1746 ("Could not resolve external function address: " + F->getName()
1747 );
1748 else
1749 return NULL;
1750
1751 AddGlobalMapping(F, Addr);
1752
1753 return Addr;
1754 }
1755
1756 void* GetPointerToNamedSymbol(const std::string& Name,
1757 bool AbortOnFailure) {
1758 if(void* Addr = FindRuntimeFunction(Name.c_str()))
1759 return Addr;
1760
1761 if(mpSymbolLookupFn)
1762 if(void* Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1763 return Addr;
1764
1765 if(AbortOnFailure)
1766 llvm::llvm_report_error("Program used external symbol '" + Name +
1767 "' which could not be resolved!");
1768
1769 return NULL;
1770 }
1771
1772 /*
1773 * GetOrEmitGlobalVariable - Return the address of the specified global
1774 * variable, possibly emitting it to memory if needed. This is used by the
1775 * Emitter.
1776 */
1777 void* GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1778 void* Ptr = GetPointerToGlobalIfAvailable(GV);
1779 if(Ptr)
1780 return Ptr;
1781
1782 if(GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1783 /* If the global is external, just remember the address. */
1784 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1785 AddGlobalMapping(GV, Ptr);
1786 } else {
1787 /* If the global hasn't been emitted to memory yet,
1788 allocate space and emit it into memory. */
1789 Ptr = GetMemoryForGV(GV);
1790 AddGlobalMapping(GV, Ptr);
1791 EmitGlobalVariable(GV);
1792 }
1793
1794 return Ptr;
1795 }
1796
1797 /*
1798 * GetMemoryForGV - This method abstracts memory allocation of global
1799 * variable so that the JIT can allocate thread local variables depending
1800 * on the target.
1801 */
1802 void* GetMemoryForGV(const llvm::GlobalVariable* GV) {
1803 char* Ptr;
1804
1805 const llvm::Type* GlobalType = GV->getType()->getElementType();
1806 size_t S = mpTD->getTypeAllocSize(GlobalType);
1807 size_t A = mpTD->getPreferredAlignment(GV);
1808
1809 if(GV->isThreadLocal()) {
1810 /*
1811 * We can support TLS by
1812 *
1813 * Ptr = TJI.allocateThreadLocalMemory(S);
1814 *
1815 * But I tend not to .
1816 * (should we disable this in the front-end (i.e. slang)?).
1817 */
1818 llvm::llvm_report_error
1819 ("Compilation of Thread Local Storage (TLS) is disabled!");
1820
1821 } else if(mpTJI->allocateSeparateGVMemory()) {
1822 /*
1823 * On the Apple's ARM target (such as iPhone),
1824 * the global variable should be
1825 * placed in separately allocated heap memory rather than in the same
1826 * code memory.
1827 * The question is, how about the Android?
1828 */
1829 if(A <= 8) {
1830 Ptr = (char*) malloc(S);
1831 } else {
1832 /*
1833 * Allocate (S + A) bytes of memory,
1834 * then use an aligned pointer within that space.
1835 */
1836 Ptr = (char*) malloc(S + A);
1837 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1838 Ptr = Ptr + (MisAligned ? (A - MisAligned) : 0);
1839 }
1840 } else {
1841 Ptr = (char*) allocateGlobal(S, A);
1842 }
1843
1844 return Ptr;
1845 }
1846
1847 void EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1848 void* GA = GetPointerToGlobalIfAvailable(GV);
1849
1850 if(GV->isThreadLocal())
1851 llvm::llvm_report_error("We don't support Thread Local Storage (TLS)!");
1852
1853 if(GA == NULL) {
1854 /* If it's not already specified, allocate memory for the global. */
1855 GA = GetMemoryForGV(GV);
1856 AddGlobalMapping(GV, GA);
1857 }
1858
1859 InitializeConstantToMemory(GV->getInitializer(), GA);
1860
1861 /* You can do some statistics on global variable here */
1862 return;
1863 }
1864
1865 typedef std::map<llvm::AssertingVH<llvm::GlobalValue>, void*
1866 > GlobalToIndirectSymMapTy;
1867 GlobalToIndirectSymMapTy GlobalToIndirectSymMap;
1868
1869 void* GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1870 /*
1871 * Make sure GV is emitted first, and create a stub containing the fully
1872 * resolved address.
1873 */
1874 void* GVAddress = GetPointerToGlobal(V, Reference, false);
1875
1876 /* If we already have a stub for this global variable, recycle it. */
1877 void*& IndirectSym = GlobalToIndirectSymMap[V];
1878 /* Otherwise, codegen a new indirect symbol. */
1879 if(!IndirectSym)
1880 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1881
1882 return IndirectSym;
1883 }
1884
1885 /*
1886 * ExternalFnToStubMap - This is the equivalent of FunctionToLazyStubMap
1887 * for external functions.
1888 *
1889 * TODO: Of course, external functions don't need a lazy stub.
1890 * It's actually
1891 * here to make it more likely that far calls succeed, but no single
1892 * stub can guarantee that. I'll remove this in a subsequent checkin
1893 * when I actually fix far calls. (comment from LLVM source)
1894 */
1895 std::map<void*, void*> ExternalFnToStubMap;
1896
1897 /*
1898 * GetExternalFunctionStub - Return a stub for the function at the
1899 * specified address.
1900 */
1901 void* GetExternalFunctionStub(void* FnAddr) {
1902 void*& Stub = ExternalFnToStubMap[FnAddr];
1903 if(Stub)
1904 return Stub;
1905
1906 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1907 startGVStub(0, SL.Size, SL.Alignment);
1908 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1909 finishGVStub();
1910
1911 return Stub;
1912 }
1913
1914
1915 void Disassembler(const std::string& Name, uint8_t* Start,
1916 size_t Length, uintptr_t PC) {
1917#if defined(USE_DISASSEMBLER)
1918 FILE* out = stdout;
1919
1920 fprintf(out, "JIT: Disassembled code: %s\n", Name.c_str());
1921
1922 disassemble_info disasm_info;
1923 int (*print_insn)(bfd_vma pc, disassemble_info *info);
1924
1925 INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf);
1926
1927 disasm_info.buffer = Start;
1928 disasm_info.buffer_vma = (bfd_vma) (uintptr_t) Start;
1929 disasm_info.buffer_length = Length;
1930 disasm_info.endian = BFD_ENDIAN_LITTLE;
1931
1932#if defined(DEFAULT_X86_CODEGEN)
1933 disasm_info.mach = bfd_mach_i386_i386;
1934 print_insn = print_insn_i386;
1935#elif defined(DEFAULT_ARM_CODEGEN)
1936 print_insn = print_insn_arm;
1937#elif defined(DEFAULT_X64_CODEGEN)
1938 disasm_info.mach = bfd_mach_x86_64;
1939 print_insn = print_insn_i386;
1940#else
1941#error "Unknown target for disassembler"
1942#endif
1943
1944#if defined(DEFAULT_X64_CODEGEN)
1945# define TARGET_FMT_lx "%llx"
1946#else
1947# define TARGET_FMT_lx "%08x"
1948#endif
1949 int Count;
1950 for( ; Length > 0; PC += Count, Length -= Count) {
1951 fprintf(out, "\t0x" TARGET_FMT_lx ": ", (bfd_vma) PC);
1952 Count = print_insn(PC, &disasm_info);
1953 fprintf(out, "\n");
1954 }
1955
1956 fprintf(out, "\n");
1957#undef TARGET_FMT_lx
1958
1959#endif /* USE_DISASSEMBLER */
1960 return;
1961 }
1962
1963 public:
1964 /* Will take the ownership of @MemMgr */
1965 CodeEmitter(CodeMemoryManager* pMemMgr) :
1966 mpMemMgr(pMemMgr),
1967 mpTJI(NULL),
1968 mpTD(NULL),
1969 mpCurEmitFunction(NULL),
1970 mpConstantPool(NULL),
1971 mpJumpTable(NULL),
1972 mpMMI(NULL),
1973 mpSymbolLookupFn(NULL),
1974 mpSymbolLookupContext(NULL)
1975 {
1976 return;
1977 }
1978
1979 inline global_addresses_const_iterator global_address_begin() const {
1980 return mGlobalAddressMap.begin();
1981 }
1982 inline global_addresses_const_iterator global_address_end() const {
1983 return mGlobalAddressMap.end();
1984 }
1985
1986 void registerSymbolCallback(BCCSymbolLookupFn pFn, BCCvoid* pContext) {
1987 mpSymbolLookupFn = pFn;
1988 mpSymbolLookupContext = pContext;
1989 return;
1990 }
1991
1992 void setTargetMachine(llvm::TargetMachine& TM) {
1993 /* set TargetJITInfo */
1994 mpTJI = TM.getJITInfo();
1995 /* set TargetData */
1996 mpTD = TM.getTargetData();
1997
1998 /*
1999 if(mpTJI->needsGOT())
2000 mpMemMgr->AllocateGOT(); // however,
2001 // both X86 and ARM target don't need GOT
2002 // (mpTJI->needsGOT() always returns false)
2003 */
2004 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
2005
2006 return;
2007 }
2008
2009 /*
2010 * startFunction - This callback is invoked when the specified function is
2011 * about to be code generated. This initializes the BufferBegin/End/Ptr
2012 * fields.
2013 */
2014 void startFunction(llvm::MachineFunction &F) {
2015 uintptr_t ActualSize = 0;
2016
2017 mpMemMgr->setMemoryWritable();
2018
2019 /*
2020 * BufferBegin, BufferEnd and CurBufferPtr
2021 * are all inherited from class MachineCodeEmitter,
2022 * which is the super class of the class JITCodeEmitter.
2023 *
2024 * BufferBegin/BufferEnd - Pointers to the start and end of the memory
2025 * allocated for this code buffer.
2026 *
2027 * CurBufferPtr - Pointer to the next byte of memory to fill when emitting
2028 * code.
2029 * This is guranteed to be in the range [BufferBegin,BufferEnd]. If
2030 * this pointer is at BufferEnd, it will never move due to code emission,
2031 * and
2032 * all code emission requests will be ignored (this is
2033 * the buffer overflow condition).
2034 */
2035 BufferBegin = CurBufferPtr = mpMemMgr
2036 ->startFunctionBody(F.getFunction(), ActualSize);
2037 BufferEnd = BufferBegin + ActualSize;
2038
2039 if(mpCurEmitFunction == NULL)
2040 mpCurEmitFunction = new EmittedFunctionCode();
2041 mpCurEmitFunction->FunctionBody = BufferBegin;
2042
2043 /* Ensure the constant pool/jump table info is at least 4-byte aligned. */
2044 emitAlignment(16);
2045
2046 emitConstantPool(F.getConstantPool());
2047 if(llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
2048 initJumpTableInfo(MJTI);
2049
2050 /* About to start emitting the machine code for the function. */
2051 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
2052
2053 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
2054
2055 mpCurEmitFunction->Code = CurBufferPtr;
2056
2057 mMBBLocations.clear();
2058
2059 return;
2060 }
2061
2062 /*
2063 * finishFunction - This callback is invoked
2064 * when the specified function has
2065 * finished code generation.
2066 * If a buffer overflow has occurred, this method
2067 * returns true (the callee is required to try again), otherwise it returns
2068 * false.
2069 */
2070 bool finishFunction(llvm::MachineFunction &F) {
2071 if(CurBufferPtr == BufferEnd) {
2072 /* No enough memory */
2073 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
2074 return false;
2075 }
2076
2077 if(llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
2078 emitJumpTableInfo(MJTI);
2079
2080 /*
2081 * FnStart is the start of the text,
2082 * not the start of the constant pool and other per-function data.
2083 */
2084 uint8_t* FnStart = (uint8_t*) GetPointerToGlobalIfAvailable
2085 (F.getFunction());
2086
2087 /* FnEnd is the end of the function's machine code. */
2088 uint8_t* FnEnd = CurBufferPtr;
2089
2090 if(!mRelocations.empty()) {
2091 /* Resolve the relocations to concrete pointers. */
2092 for(int i=0;i<mRelocations.size();i++) {
2093 llvm::MachineRelocation& MR = mRelocations[i];
2094 void* ResultPtr = NULL;
2095
2096 if(!MR.letTargetResolve()) {
2097 if(MR.isExternalSymbol()) {
2098 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
2099 if(MR.mayNeedFarStub())
2100 ResultPtr = GetExternalFunctionStub(ResultPtr);
2101 } else if(MR.isGlobalValue()) {
2102 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
2103 BufferBegin
2104 + MR.getMachineCodeOffset(),
2105 MR.mayNeedFarStub());
2106 } else if(MR.isIndirectSymbol()) {
2107 ResultPtr = GetPointerToGVIndirectSym
2108 (MR.getGlobalValue(),
2109 BufferBegin + MR.getMachineCodeOffset()
2110 );
2111 } else if(MR.isBasicBlock()) {
2112 ResultPtr =
2113 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
2114 } else if(MR.isConstantPoolIndex()) {
2115 ResultPtr =
2116 (void*) getConstantPoolEntryAddress
2117 (MR.getConstantPoolIndex());
2118 } else {
2119 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
2120 ResultPtr =
2121 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
2122 }
2123
2124 MR.setResultPointer(ResultPtr);
2125 }
2126 }
2127
2128 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
2129 mpMemMgr->getGOTBase());
2130 }
2131
2132 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
2133 /*
2134 * CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
2135 * global variables that were referenced in the relocations.
2136 */
2137 if(CurBufferPtr == BufferEnd)
2138 return false;
2139
2140 /* Now that we've succeeded in emitting the function */
2141 mpCurEmitFunction->Size = CurBufferPtr - BufferBegin;
2142 BufferBegin = CurBufferPtr = 0;
2143
2144 if(F.getFunction()->hasName())
2145 mEmittedFunctions[F.getFunction()->getNameStr()] = mpCurEmitFunction;
2146 mpCurEmitFunction = NULL;
2147
2148 mRelocations.clear();
2149 mConstPoolAddresses.clear();
2150
2151 /* Mark code region readable and executable if it's not so already. */
2152 mpMemMgr->setMemoryExecutable();
2153
2154 Disassembler(F.getFunction()->getNameStr(),
2155 FnStart, FnEnd - FnStart, (uintptr_t) FnStart);
2156
2157 if(mpMMI)
2158 mpMMI->EndFunction();
2159
2160 return false;
2161 }
2162
2163 void startGVStub(const llvm::GlobalValue* GV, unsigned StubSize,
2164 unsigned Alignment) {
2165 mpSavedBufferBegin = BufferBegin;
2166 mpSavedBufferEnd = BufferEnd;
2167 mpSavedCurBufferPtr = CurBufferPtr;
2168
2169 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
2170 Alignment);
2171 BufferEnd = BufferBegin + StubSize + 1;
2172
2173 return;
2174 }
2175
2176 void startGVStub(void* Buffer, unsigned StubSize) {
2177 mpSavedBufferBegin = BufferBegin;
2178 mpSavedBufferEnd = BufferEnd;
2179 mpSavedCurBufferPtr = CurBufferPtr;
2180
2181 BufferBegin = CurBufferPtr = (uint8_t *) Buffer;
2182 BufferEnd = BufferBegin + StubSize + 1;
2183
2184 return;
2185 }
2186
2187 void finishGVStub() {
2188 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
2189
2190 /* restore */
2191 BufferBegin = mpSavedBufferBegin;
2192 BufferEnd = mpSavedBufferEnd;
2193 CurBufferPtr = mpSavedCurBufferPtr;
2194
2195 return;
2196 }
2197
2198 /*
2199 * allocIndirectGV - Allocates and fills storage for an indirect
2200 * GlobalValue, and returns the address.
2201 */
2202 void* allocIndirectGV(const llvm::GlobalValue *GV,
2203 const uint8_t *Buffer, size_t Size,
2204 unsigned Alignment) {
2205 uint8_t* IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
2206 memcpy(IndGV, Buffer, Size);
2207 return IndGV;
2208 }
2209
2210 /* emitLabel - Emits a label */
2211 void emitLabel(uint64_t LabelID) {
2212 if(mLabelLocations.size() <= LabelID)
2213 mLabelLocations.resize((LabelID + 1) * 2);
2214 mLabelLocations[LabelID] = getCurrentPCValue();
2215 return;
2216 }
2217
2218 /*
2219 * allocateGlobal - Allocate memory for a global. Unlike allocateSpace,
2220 * this method does not allocate memory in the current output buffer,
2221 * because a global may live longer than the current function.
2222 */
2223 void* allocateGlobal(uintptr_t Size, unsigned Alignment) {
2224 /* Delegate this call through the memory manager. */
2225 return mpMemMgr->allocateGlobal(Size, Alignment);
2226 }
2227
2228 /*
2229 * StartMachineBasicBlock - This should be called by the target when a new
2230 * basic block is about to be emitted. This way the MCE knows where the
2231 * start of the block is, and can implement getMachineBasicBlockAddress.
2232 */
2233 void StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
2234 if(mMBBLocations.size() <= (unsigned) MBB->getNumber())
2235 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
2236 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
2237 return;
2238 }
2239
2240 /*
2241 * addRelocation - Whenever a relocatable address is needed, it should be
2242 * noted with this interface.
2243 */
2244 void addRelocation(const llvm::MachineRelocation &MR) {
2245 mRelocations.push_back(MR);
2246 return;
2247 }
2248
2249 /*
2250 * getConstantPoolEntryAddress - Return the address of the 'Index' entry in
2251 * the constant pool that was last emitted with
2252 * the emitConstantPool method.
2253 */
2254 uintptr_t getConstantPoolEntryAddress(unsigned Index) const {
2255 assert(Index < mpConstantPool->getConstants().size() &&
2256 "Invalid constant pool index!");
2257 return mConstPoolAddresses[Index];
2258 }
2259
2260 /*
2261 * getJumpTableEntryAddress - Return the address of the jump table
2262 * with index
2263 * 'Index' in the function that last called initJumpTableInfo.
2264 */
2265 uintptr_t getJumpTableEntryAddress(unsigned Index) const {
2266 const std::vector<llvm::MachineJumpTableEntry>& JT =
2267 mpJumpTable->getJumpTables();
2268
2269 assert(Index < JT.size() && "Invalid jump table index!");
2270
2271 unsigned int Offset = 0;
2272 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
2273
2274 for(int i=0;i<Index;i++)
2275 Offset += JT[i].MBBs.size();
2276 Offset *= EntrySize;
2277
2278 return (uintptr_t)((char *) mpJumpTableBase + Offset);
2279 }
2280
2281 /*
2282 * getMachineBasicBlockAddress - Return the address of the specified
2283 * MachineBasicBlock, only usable after the label for the MBB has been
2284 * emitted.
2285 */
2286 uintptr_t getMachineBasicBlockAddress(llvm::MachineBasicBlock *MBB) const {
2287 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
2288 mMBBLocations[MBB->getNumber()] && "MBB not emitted!");
2289 return mMBBLocations[MBB->getNumber()];
2290 }
2291
2292 /*
2293 * getLabelAddress - Return the address of the specified LabelID,
2294 * only usable after the LabelID has been emitted.
2295 */
2296 uintptr_t getLabelAddress(uint64_t LabelID) const {
2297 assert(mLabelLocations.size() > (unsigned) LabelID &&
2298 mLabelLocations[LabelID] && "Label not emitted!");
2299 return mLabelLocations[LabelID];
2300 }
2301
2302 /*
2303 * Specifies the MachineModuleInfo object.
2304 * This is used for exception handling
2305 * purposes.
2306 */
2307 void setModuleInfo(llvm::MachineModuleInfo* Info) {
2308 mpMMI = Info;
2309 return;
2310 }
2311
2312 void updateFunctionStub(llvm::Function* F) {
2313 /* Get the empty stub we generated earlier. */
2314 void* Stub;
2315 std::set<llvm::Function*>::iterator I = PendingFunctions.find(F);
2316 if(I != PendingFunctions.end())
2317 Stub = *I;
2318 else
2319 return;
2320
2321 void* Addr = GetPointerToGlobalIfAvailable(F);
2322
2323 assert(Addr != Stub &&
2324 "Function must have non-stub address to be updated.");
2325
2326 /*
2327 * Tell the target jit info to rewrite the stub at the specified address,
2328 * rather than creating a new one.
2329 */
2330 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
2331 startGVStub(Stub, SL.Size);
2332 mpTJI->emitFunctionStub(F, Addr, *this);
2333 finishGVStub();
2334
2335 Disassembler(F->getNameStr() + " (stub)", (uint8_t*) Stub,
2336 SL.Size, (uintptr_t) Stub);
2337
2338 PendingFunctions.erase(I);
2339
2340 return;
2341 }
2342
2343 /*
2344 * Once you finish the compilation on a translation unit,
2345 * you can call this function to recycle the memory
2346 * (which is used at compilation time and not needed for runtime).
2347 *
2348 * NOTE: You should not call this funtion until the code-gen passes
2349 * for a given module is done.
2350 * Otherwise, the results is undefined and may cause the system crash!
2351 */
2352 void releaseUnnecessary() {
2353 mMBBLocations.clear();
2354 mLabelLocations.clear();
2355 //sliao mGlobalAddressMap.clear();
2356 mFunctionToLazyStubMap.clear();
2357 GlobalToIndirectSymMap.clear();
2358 ExternalFnToStubMap.clear();
2359 PendingFunctions.clear();
2360
2361 return;
2362 }
2363
2364 void reset() {
2365 releaseUnnecessary();
2366
2367 mpSymbolLookupFn = NULL;
2368 mpSymbolLookupContext = NULL;
2369
2370 mpTJI = NULL;
2371 mpTD = NULL;
2372
2373 for(EmittedFunctionsMapTy::iterator I = mEmittedFunctions.begin();
2374 I != mEmittedFunctions.end();
2375 I++)
2376 if(I->second != NULL)
2377 delete I->second;
2378 mEmittedFunctions.clear();
2379
2380 mpMemMgr->reset();
2381
2382 return;
2383 }
2384
2385 void* lookup(const char* Name) {
2386 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(Name);
2387 if(I == mEmittedFunctions.end())
2388 return NULL;
2389 else
2390 return I->second->Code;
2391 }
2392
2393 void getVarNames(llvm::Module *M,
2394 BCCsizei* actualVarCount,
2395 BCCsizei maxVarCount,
2396 void** vars) {
2397 int cnt = 0;
2398 for (llvm::Module::const_global_iterator c = M->global_begin(),
2399 e = M->global_end(); c != e; ++c) {
2400 llvm::GlobalVariable *g = (const_cast<llvm::GlobalVariable*> (&(*c)));
2401 if (!g->hasInternalLinkage()) {
2402 cnt++;
2403 }
2404 }
2405
2406 if (actualVarCount)
2407 *actualVarCount = cnt;
2408 if (cnt > maxVarCount)
2409 cnt = maxVarCount;
2410 if (!vars)
2411 return;
2412
2413 for (llvm::Module::const_global_iterator c = M->global_begin(),
2414 e = M->global_end();
2415 c != e && cnt > 0;
2416 ++c, --cnt) {
2417 llvm::GlobalVariable *g = (const_cast<llvm::GlobalVariable*> (&(*c)));
2418 if (!g->hasInternalLinkage()) {
2419 // A member function in CodeEmitter
2420 *vars++ = (void*) GetPointerToGlobalIfAvailable(g);
2421 }
2422 }
2423 }
2424
2425 void getFunctionNames(BCCsizei* actualFunctionCount,
2426 BCCsizei maxFunctionCount,
2427 BCCchar** functions) {
2428 int functionCount = mEmittedFunctions.size();
2429
2430 if(actualFunctionCount)
2431 *actualFunctionCount = functionCount;
2432 if(functionCount > maxFunctionCount)
2433 functionCount = maxFunctionCount;
2434 if(functions)
2435 for(EmittedFunctionsMapTy::const_iterator it =
2436 mEmittedFunctions.begin();
2437 functionCount > 0;
2438 functionCount--, it++)
2439 *functions++ = (BCCchar*) it->first.c_str();
2440
2441 return;
2442 }
2443
2444 void getFunctionBinary(BCCchar* label,
2445 BCCvoid** base,
2446 BCCsizei* length) {
2447 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(label);
2448 if(I == mEmittedFunctions.end()) {
2449 *base = NULL;
2450 *length = 0;
2451 } else {
2452 *base = I->second->Code;
2453 *length = I->second->Size;
2454 }
2455 return;
2456 }
2457
2458 ~CodeEmitter() {
2459 if(mpMemMgr)
2460 delete mpMemMgr;
2461 return;
2462 }
2463 /* }}} */
2464 }; /* End of Class CodeEmitter */
2465
2466 /* The CodeEmitter */
2467 llvm::OwningPtr<CodeEmitter> mCodeEmitter;
2468 CodeEmitter* createCodeEmitter() {
2469 mCodeEmitter.reset(new CodeEmitter(mCodeMemMgr.take()));
2470 return mCodeEmitter.get();
2471 }
2472
2473 BCCSymbolLookupFn mpSymbolLookupFn;
2474 void* mpSymbolLookupContext;
2475
2476 llvm::Module* mModule;
2477
2478 bool mTypeInformationPrepared;
2479 std::vector<const llvm::Type*> mTypes;
2480
2481 typedef llvm::StringMap<void*> GlobalVarAddresseTy;
2482 GlobalVarAddresseTy mGlobalVarAddresses;
2483
2484 public:
2485 Compiler() : mpSymbolLookupFn(NULL), mpSymbolLookupContext(NULL), mModule(NULL) {
2486 llvm::llvm_install_error_handler(LLVMErrorHandler, &mError);
2487 return;
2488 }
2489
2490 /* interface for BCCscript::registerSymbolCallback() */
2491 void registerSymbolCallback(BCCSymbolLookupFn pFn, BCCvoid* pContext) {
2492 mpSymbolLookupFn = pFn;
2493 mpSymbolLookupContext = pContext;
2494 return;
2495 }
2496
2497 int loadModule(const char* bitcode, size_t bitcodeSize) {
2498 llvm::MemoryBuffer* SB = NULL;
2499
2500 if(bitcode == NULL || bitcodeSize <= 0)
2501 return 0;
2502
2503 GlobalInitialization();
2504
2505 /* Package input to object MemoryBuffer */
2506 SB = llvm::MemoryBuffer::getMemBuffer(bitcode, bitcode + bitcodeSize);
2507 if(SB == NULL) {
2508 setError("Error reading input Bitcode into memory");
2509 goto on_bcc_load_module_error;
2510 }
2511
2512 /* Read the input Bitcode as a Module */
2513 mModule = llvm::ParseBitcodeFile(SB, llvm::getGlobalContext(), &mError);
2514
2515on_bcc_load_module_error:
2516 if (SB)
2517 delete SB;
2518
2519 return hasError();
2520 }
2521
2522 /* interace for bccCompileScript() */
2523 int compile() {
2524 llvm::TargetData* TD = NULL;
2525
2526 llvm::TargetMachine* TM = NULL;
2527 const llvm::Target* Target;
2528 std::string FeaturesStr;
2529
2530 llvm::FunctionPassManager* CodeGenPasses = NULL;
2531 const llvm::NamedMDNode* PragmaMetadata;
2532
2533 if(mModule == NULL) /* No module was loaded */
2534 return 0;
2535
2536 /* Create TargetMachine */
2537 Target = llvm::TargetRegistry::lookupTarget(Triple, mError);
2538 if(hasError())
2539 goto on_bcc_compile_error;
2540
2541 if(!CPU.empty() || !Features.empty()) {
2542 llvm::SubtargetFeatures F;
2543 F.setCPU(CPU);
2544 for(std::vector<std::string>::const_iterator it = Features.begin();
2545 it != Features.end();
2546 it++)
2547 F.AddFeature(*it);
2548 FeaturesStr = F.getString();
2549 }
2550
2551 TM = Target->createTargetMachine(Triple, FeaturesStr);
2552 if(TM == NULL) {
2553 setError("Failed to create target machine implementation for the"
2554 " specified triple '" + Triple + "'");
2555 goto on_bcc_compile_error;
2556 }
2557
2558 /* Create memory manager for creation of code emitter later */
2559 if(!mCodeMemMgr.get() && !createCodeMemoryManager()) {
2560 setError("Failed to startup memory management for further compilation");
2561 goto on_bcc_compile_error;
2562 }
2563
2564 /* Create code emitter */
2565 if(!mCodeEmitter.get()) {
2566 if(!createCodeEmitter()) {
2567 setError("Failed to create machine code emitter to complete"
2568 " the compilation");
2569 goto on_bcc_compile_error;
2570 }
2571 } else {
2572 /* reuse the code emitter */
2573 mCodeEmitter->reset();
2574 }
2575
2576 mCodeEmitter->setTargetMachine(*TM);
2577 mCodeEmitter->registerSymbolCallback(mpSymbolLookupFn,
2578 mpSymbolLookupContext);
2579
2580 /* Get target data from Module */
2581 TD = new llvm::TargetData(mModule);
2582 /* Create code-gen pass to run the code emitter */
2583 CodeGenPasses = new llvm::FunctionPassManager(mModule);
2584 CodeGenPasses->add(TD); // Will take the ownership of TD
2585
2586 if(TM->addPassesToEmitMachineCode(*CodeGenPasses,
2587 *mCodeEmitter, CodeGenOptLevel)) {
2588 setError("The machine code emission is not supported by BCC on target '"
2589 + Triple + "'");
2590 goto on_bcc_compile_error;
2591 }
2592
2593 /*
2594 * Run the pass (the code emitter) on every non-declaration function
2595 * in the module
2596 */
2597 CodeGenPasses->doInitialization();
2598 for(llvm::Module::iterator I = mModule->begin();
2599 I != mModule->end();
2600 I++)
2601 if(!I->isDeclaration())
2602 CodeGenPasses->run(*I);
2603
2604 CodeGenPasses->doFinalization();
2605
2606 /* Copy the global address mapping from code emitter and remapping */
2607 for(CodeEmitter::global_addresses_const_iterator I =
2608 mCodeEmitter->global_address_begin();
2609 I != mCodeEmitter->global_address_end();
2610 I++)
2611 {
2612 if(I->first->getValueID() != llvm::Value::GlobalVariableVal)
2613 continue;
2614 llvm::StringRef GlobalVarName = I->first->getName();
2615 GlobalVarAddresseTy::value_type* V =
2616 GlobalVarAddresseTy::value_type::Create(
2617 GlobalVarName.begin(),
2618 GlobalVarName.end(),
2619 mGlobalVarAddresses.getAllocator(),
2620 I->second
2621 );
2622 bool ret = mGlobalVarAddresses.insert(V);
2623 assert(ret && "The global variable name should be unique over the module");
2624 }
2625
2626 /*
2627 * Tell code emitter now can release the memory using
2628 * during the JIT since we have done the code emission
2629 */
2630 mCodeEmitter->releaseUnnecessary();
2631
2632 /*
2633 * Finally, read pragma information from the metadata node
2634 * of the @Module if any
2635 */
2636 PragmaMetadata = mModule->getNamedMetadata(PragmaMetadataName);
2637 if(PragmaMetadata)
2638 for(int i=0;i<PragmaMetadata->getNumOperands();i++) {
2639 llvm::MDNode* Pragma = PragmaMetadata->getOperand(i);
2640 if(Pragma != NULL &&
2641 Pragma->getNumOperands() == 2 /* should have exactly 2 operands */) {
2642 llvm::Value* PragmaNameMDS = Pragma->getOperand(0);
2643 llvm::Value* PragmaValueMDS = Pragma->getOperand(1);
2644
2645 if((PragmaNameMDS->getValueID() == llvm::Value::MDStringVal) &&
2646 (PragmaValueMDS->getValueID() == llvm::Value::MDStringVal)) {
2647 llvm::StringRef PragmaName =
2648 static_cast<llvm::MDString*>(PragmaNameMDS)->getString();
2649 llvm::StringRef PragmaValue =
2650 static_cast<llvm::MDString*>(PragmaValueMDS)->getString();
2651
2652 mPragmas.push_back( make_pair( std::string(PragmaName.data(),
2653 PragmaName.size()),
2654 std::string(PragmaValue.data(),
2655 PragmaValue.size())
2656 )
2657 );
2658 }
2659 }
2660 }
2661
2662 on_bcc_compile_error:
2663 if (CodeGenPasses) {
2664 delete CodeGenPasses;
2665 } else if (TD) {
2666 delete TD;
2667 }
2668 if (TM)
2669 delete TM;
2670
2671 return hasError();
2672 }
2673
2674 /* interface for bccGetScriptInfoLog() */
2675 char* getErrorMessage() {
2676 return const_cast<char*>(mError.c_str());
2677 }
2678
2679 /* interface for bccGetScriptLabel() */
2680 void* lookup(const char* name) {
2681 void* addr = NULL;
2682 if(mCodeEmitter.get()) {
2683 /* Find function pointer */
2684 addr = mCodeEmitter->lookup(name);
2685 if(addr == NULL) {
2686 /*
2687 * No function labeled with given name.
2688 * Try searching the global variables.
2689 */
2690 GlobalVarAddresseTy::const_iterator I = mGlobalVarAddresses.find(name);
2691 if(I != mGlobalVarAddresses.end())
2692 addr = I->getValue();
2693 }
2694 }
2695 return addr;
2696 }
2697
2698 /* Interface for bccGetPragmas() */
2699 void getPragmas(BCCsizei* actualStringCount,
2700 BCCsizei maxStringCount,
2701 BCCchar** strings) {
2702 int stringCount = mPragmas.size() * 2;
2703
2704 if(actualStringCount)
2705 *actualStringCount = stringCount;
2706 if(stringCount > maxStringCount)
2707 stringCount = maxStringCount;
2708 if(strings)
2709 for(PragmaList::const_iterator it = mPragmas.begin();
2710 stringCount > 0;
2711 stringCount-=2, it++)
2712 {
2713 *strings++ = (BCCchar*) it->first.c_str();
2714 *strings++ = (BCCchar*) it->second.c_str();
2715 }
2716
2717 return;
2718 }
2719
2720 /* Interface for bccGetVars() */
2721 void getVars(BCCsizei* actualVarCount,
2722 BCCsizei maxVarCount,
2723 void** vars) {
2724 if(mCodeEmitter.get())
2725 mCodeEmitter->getVarNames(mModule,
2726 actualVarCount,
2727 maxVarCount,
2728 vars);
2729 else
2730 *actualVarCount = 0;
2731
2732 return;
2733 }
2734
2735 /* Interface for bccGetFunctions() */
2736 void getFunctions(BCCsizei* actualFunctionCount,
2737 BCCsizei maxFunctionCount,
2738 BCCchar** functions) {
2739 if(mCodeEmitter.get())
2740 mCodeEmitter->getFunctionNames(actualFunctionCount,
2741 maxFunctionCount,
2742 functions);
2743 else
2744 *actualFunctionCount = 0;
2745
2746 return;
2747 }
2748
2749 /* Interface for bccGetFunctionBinary() */
2750 void getFunctionBinary(BCCchar* function,
2751 BCCvoid** base,
2752 BCCsizei* length) {
2753 if(mCodeEmitter.get()) {
2754 mCodeEmitter->getFunctionBinary(function, base, length);
2755 } else {
2756 *base = NULL;
2757 *length = 0;
2758 }
2759 return;
2760 }
2761
2762 inline const llvm::Module* getModule() const {
2763 return mModule;
2764 }
2765
2766 inline const std::vector<const llvm::Type*>& getTypes() const {
2767 return mTypes;
2768 }
2769
2770 ~Compiler() {
2771 delete mModule;
2772 llvm::llvm_shutdown();
2773 return;
2774 }
2775}; /* End of Class Compiler */
2776
2777bool Compiler::GlobalInitialized = false;
2778
2779/* Code generation optimization level for the compiler */
2780llvm::CodeGenOpt::Level Compiler::CodeGenOptLevel;
2781
2782std::string Compiler::Triple;
2783
2784std::string Compiler::CPU;
2785
2786std::vector<std::string> Compiler::Features;
2787
2788/*
2789 * The named of metadata node that pragma resides
2790 * (should be synced with slang.cpp)
2791 */
2792const llvm::StringRef Compiler::PragmaMetadataName = "#pragma";
2793
2794struct BCCscript {
2795 /*
2796 * Part I. Compiler
2797 */
2798
2799 Compiler compiler;
2800
2801 void registerSymbolCallback(BCCSymbolLookupFn pFn, BCCvoid* pContext) {
2802 compiler.registerSymbolCallback(pFn, pContext);
2803 }
2804
2805 /*
2806 * Part II. Logistics & Error handling
2807 */
2808
2809 BCCscript() {
2810 bccError = BCC_NO_ERROR;
2811 }
2812
2813 ~BCCscript() {
2814 }
2815
2816 void setError(BCCenum error) {
2817 if (bccError == BCC_NO_ERROR && error != BCC_NO_ERROR) {
2818 bccError = error;
2819 }
2820 }
2821
2822 BCCenum getError() {
2823 BCCenum result = bccError;
2824 bccError = BCC_NO_ERROR;
2825 return result;
2826 }
2827
2828 BCCenum bccError;
2829};
2830
2831
2832extern "C"
2833BCCscript* bccCreateScript()
2834{
2835 return new BCCscript();
2836}
2837
2838extern "C"
2839BCCenum bccGetError( BCCscript* script )
2840{
2841 return script->getError();
2842}
2843
2844extern "C"
2845void bccDeleteScript(BCCscript* script) {
2846 delete script;
2847}
2848
2849extern "C"
2850void bccRegisterSymbolCallback(BCCscript* script,
2851 BCCSymbolLookupFn pFn,
2852 BCCvoid* pContext)
2853{
2854 script->registerSymbolCallback(pFn, pContext);
2855}
2856
2857extern "C"
2858void bccScriptBitcode(BCCscript* script,
2859 const BCCchar* bitcode,
2860 BCCint size)
2861{
2862 script->compiler.loadModule(bitcode, size);
2863}
2864
2865extern "C"
2866void bccCompileScript(BCCscript* script)
2867{
2868 int result = script->compiler.compile();
2869 if (result)
2870 script->setError(BCC_INVALID_OPERATION);
2871}
2872
2873extern "C"
2874void bccGetScriptInfoLog(BCCscript* script,
2875 BCCsizei maxLength,
2876 BCCsizei* length,
2877 BCCchar* infoLog)
2878{
2879 char* message = script->compiler.getErrorMessage();
2880 int messageLength = strlen(message) + 1;
2881 if (length)
2882 *length = messageLength;
2883
2884 if (infoLog && maxLength > 0) {
2885 int trimmedLength = maxLength < messageLength ? maxLength : messageLength;
2886 memcpy(infoLog, message, trimmedLength);
2887 infoLog[trimmedLength] = 0;
2888 }
2889}
2890
2891extern "C"
2892void bccGetScriptLabel(BCCscript* script,
2893 const BCCchar * name,
2894 BCCvoid ** address)
2895{
2896 void* value = script->compiler.lookup(name);
2897 if (value)
2898 *address = value;
2899 else
2900 script->setError(BCC_INVALID_VALUE);
2901}
2902
2903extern "C"
2904void bccGetPragmas(BCCscript* script,
2905 BCCsizei* actualStringCount,
2906 BCCsizei maxStringCount,
2907 BCCchar** strings)
2908{
2909 script->compiler.getPragmas(actualStringCount, maxStringCount, strings);
2910}
2911
2912extern "C"
2913void bccGetVars(BCCscript* script,
2914 BCCsizei* actualVarCount,
2915 BCCsizei maxVarCount,
2916 void** vars)
2917{
2918 script->compiler.getVars(actualVarCount,
2919 maxVarCount,
2920 vars);
2921}
2922
2923extern "C"
2924void bccGetFunctions(BCCscript* script,
2925 BCCsizei* actualFunctionCount,
2926 BCCsizei maxFunctionCount,
2927 BCCchar** functions)
2928{
2929 script->compiler.getFunctions(actualFunctionCount,
2930 maxFunctionCount,
2931 functions);
2932}
2933
2934extern "C"
2935void bccGetFunctionBinary(BCCscript* script,
2936 BCCchar* function,
2937 BCCvoid** base,
2938 BCCsizei* length)
2939{
2940 script->compiler.getFunctionBinary(function, base, length);
2941}
2942
2943struct BCCtype {
2944 const Compiler* compiler;
2945 const llvm::Type* t;
2946};
2947
2948} /* End of namespace bcc */