blob: 02cd867ac336f145b2fcc908cee03c213f07fe2c [file] [log] [blame]
Shih-wei Liao77ed6142010-04-07 12:21:42 -07001/*
2 * Bitcode compiler (bcc) for Android:
3 * This is an eager-compilation JIT running on Android.
4 *
5 */
6
7#define LOG_TAG "bcc"
8#include <cutils/log.h>
9
10#include <ctype.h>
11#include <errno.h>
12#include <limits.h>
13#include <stdarg.h>
14#include <stdint.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18#include <unistd.h>
19
20#include <cutils/hashmap.h>
21
22#if defined(__i386__)
23#include <sys/mman.h>
24#endif
25
26#if defined(__arm__)
27 #define DEFAULT_ARM_CODEGEN
28 #define PROVIDE_ARM_CODEGEN
29#elif defined(__i386__)
30 #define DEFAULT_X86_CODEGEN
31 #define PROVIDE_X86_CODEGEN
32#elif defined(__x86_64__)
33 #define DEFAULT_X64_CODEGEN
34 #define PROVIDE_X64_CODEGEN
35#endif
36
37#if defined(FORCE_ARM_CODEGEN)
38 #define DEFAULT_ARM_CODEGEN
39 #undef DEFAULT_X86_CODEGEN
40 #undef DEFAULT_X64_CODEGEN
41 #define PROVIDE_ARM_CODEGEN
42 #undef PROVIDE_X86_CODEGEN
43 #undef PROVIDE_X64_CODEGEN
44#elif defined(FORCE_X86_CODEGEN)
45 #undef DEFAULT_ARM_CODEGEN
46 #define DEFAULT_X86_CODEGEN
47 #undef DEFAULT_X64_CODEGEN
48 #undef PROVIDE_ARM_CODEGEN
49 #define PROVIDE_X86_CODEGEN
50 #undef PROVIDE_X64_CODEGEN
51#elif defined(FORCE_X64_CODEGEN)
52 #undef DEFAULT_ARM_CODEGEN
53 #undef DEFAULT_X86_CODEGEN
54 #define DEFAULT_X64_CODEGEN
55 #undef PROVIDE_ARM_CODEGEN
56 #undef PROVIDE_X86_CODEGEN
57 #define PROVIDE_X64_CODEGEN
58#endif
59
60#if defined(DEFAULT_ARM_CODEGEN)
61 #define TARGET_TRIPLE_STRING "armv7-none-linux-gnueabi"
62#elif defined(DEFAULT_X86_CODEGEN)
63 #define TARGET_TRIPLE_STRING "i686-unknown-linux"
64#elif defined(DEFAULT_X64_CODEGEN)
65 #define TARGET_TRIPLE_STRING "x86_64-unknown-linux"
66#endif
67
68#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
69#define ARM_USE_VFP
70#endif
71
72#include <bcc/bcc.h>
73#include "bcc_runtime.h"
74
75#define LOG_API(...) do {} while(0)
76// #define LOG_API(...) fprintf (stderr, __VA_ARGS__)
77
78#define LOG_STACK(...) do {} while(0)
79// #define LOG_STACK(...) fprintf (stderr, __VA_ARGS__)
80
81// #define PROVIDE_TRACE_CODEGEN
82
83#if defined(USE_DISASSEMBLER)
84# include "disassembler/dis-asm.h"
85# include <cstdio>
86#endif
87
88#include <set>
89#include <map>
90#include <list>
91#include <cmath>
92#include <string>
93#include <cstring>
94#include <algorithm> /* for std::reverse */
95
96// Basic
97#include "llvm/Use.h" /* for class llvm::Use */
98#include "llvm/User.h" /* for class llvm::User */
99#include "llvm/Module.h" /* for class llvm::Module */
100#include "llvm/Function.h" /* for class llvm::Function */
101#include "llvm/Constant.h" /* for class llvm::Constant */
102#include "llvm/Constants.h" /* for class llvm::ConstantExpr */
103#include "llvm/Instruction.h" /* for class llvm::Instruction */
104#include "llvm/PassManager.h" /* for class llvm::PassManager and
105 * llvm::FunctionPassManager
106 */
107#include "llvm/LLVMContext.h" /* for llvm::getGlobalContext() */
108#include "llvm/GlobalValue.h" /* for class llvm::GlobalValue */
109#include "llvm/Instructions.h" /* for class llvm::CallInst */
110#include "llvm/OperandTraits.h" /* for macro
111 * DECLARE_TRANSPARENT_OPERAND_ACCESSORS
112 * and macro
113 * DEFINE_TRANSPARENT_OPERAND_ACCESSORS
114 */
115#include "llvm/TypeSymbolTable.h" /* for Type Reflection */
116
117// System
118#include "llvm/System/Host.h" /* for function
119 * llvm::sys::isLittleEndianHost()
120 */
121#include "llvm/System/Memory.h" /* for class llvm::sys::MemoryBlock */
122
123// ADT
124#include "llvm/ADT/APInt.h" /* for class llvm::APInt */
125#include "llvm/ADT/APFloat.h" /* for class llvm::APFloat */
126#include "llvm/ADT/DenseMap.h" /* for class llvm::DenseMap */
127#include "llvm/ADT/ValueMap.h" /* for class llvm::ValueMap and
128 * class llvm::ValueMapConfig
129 */
130#include "llvm/ADT/StringMap.h" /* for class llvm::StringMap */
131#include "llvm/ADT/OwningPtr.h" /* for class llvm::OwningPtr */
132#include "llvm/ADT/SmallString.h" /* for class llvm::SmallString */
133
134// Target
135#include "llvm/Target/TargetData.h" /* for class llvm::TargetData */
136#include "llvm/Target/TargetSelect.h" /* for function
137 * LLVMInitialize[ARM|X86]
138 * [TargetInfo|Target]()
139 */
140#include "llvm/Target/TargetOptions.h" /* for
141 * variable bool llvm::UseSoftFloat
142 * FloatABI::ABIType llvm::FloatABIType
143 * bool llvm::NoZerosInBSS
144 */
145#include "llvm/Target/TargetMachine.h" /* for class llvm::TargetMachine and
146 * llvm::TargetMachine::AssemblyFile
147 */
148#include "llvm/Target/TargetJITInfo.h" /* for class llvm::TargetJITInfo */
149#include "llvm/Target/TargetRegistry.h" /* for class llvm::TargetRegistry */
150#include "llvm/Target/SubtargetFeature.h"
151 /* for class llvm::SubtargetFeature */
152
153// Support
154#include "llvm/Support/Casting.h" /* for class cast<> */
155#include "llvm/Support/raw_ostream.h" /* for class llvm::raw_ostream and
156 * llvm::raw_string_ostream
157 */
158#include "llvm/Support/ValueHandle.h" /* for class AssertingVH<> */
159#include "llvm/Support/MemoryBuffer.h" /* for class llvm::MemoryBuffer */
160#include "llvm/Support/ManagedStatic.h" /* for class llvm::llvm_shutdown */
161#include "llvm/Support/ErrorHandling.h" /* for function
162 * llvm::llvm_install_error_handler()
163 * and macro llvm_unreachable()
164 */
165#include "llvm/Support/StandardPasses.h"/* for function
166 * llvm::createStandardFunctionPasses()
167 * and
168 * llvm::createStandardModulePasses()
169 */
170#include "llvm/Support/FormattedStream.h"
171 /* for
172 * class llvm::formatted_raw_ostream
173 * llvm::formatted_raw_ostream::
174 * PRESERVE_STREAM
175 * llvm::FileModel::Error
176 */
177
178// Bitcode
179#include "llvm/Bitcode/ReaderWriter.h" /* for function
180 * llvm::ParseBitcodeFile()
181 */
182
183// CodeGen
184#include "llvm/CodeGen/Passes.h" /* for
185 * llvm::createLocalRegisterAllocator()
186 * and
187 * llvm::
188 * createLinearScanRegisterAllocator()
189 */
190#include "llvm/CodeGen/JITCodeEmitter.h"/* for class llvm::JITCodeEmitter */
191#include "llvm/CodeGen/MachineFunction.h"
192 /* for class llvm::MachineFunction */
193#include "llvm/CodeGen/RegAllocRegistry.h"
194 /* for class llvm::RegisterRegAlloc */
195#include "llvm/CodeGen/SchedulerRegistry.h"
196 /* for class llvm::RegisterScheduler
197 * and llvm::createDefaultScheduler()
198 */
199#include "llvm/CodeGen/MachineRelocation.h"
200 /* for class llvm::MachineRelocation */
201#include "llvm/CodeGen/MachineModuleInfo.h"
202 /* for class llvm::MachineModuleInfo */
203#include "llvm/CodeGen/MachineCodeEmitter.h"
204 /* for class llvm::MachineCodeEmitter */
205#include "llvm/CodeGen/MachineConstantPool.h"
206 /* for class llvm::MachineConstantPool
207 */
208#include "llvm/CodeGen/MachineJumpTableInfo.h"
209 /* for class llvm::MachineJumpTableInfo
210 */
211
212// ExecutionEngine
213#include "llvm/ExecutionEngine/GenericValue.h"
214 /* for struct llvm::GenericValue */
215#include "llvm/ExecutionEngine/JITMemoryManager.h"
216 /* for class llvm::JITMemoryManager */
217
218
219/*
220 * Compilation class that suits Android's needs.
221 * (Support: no argument passed, ...)
222 */
223
224namespace bcc {
225
226class Compiler {
227 /*
228 * This part is designed to be orthogonal to those exported bcc*() functions
229 * implementation and internal struct BCCscript.
230 */
231
232
233 /*********************************************
234 * The variable section below (e.g., Triple, CodeGenOptLevel)
235 * is initialized in GlobalInitialization()
236 */
237 static bool GlobalInitialized;
238
239 /*
240 * If given, this will be the name of the target triple to compile for.
241 * If not given, the initial values defined in this file will be used.
242 */
243 static std::string Triple;
244
245 static llvm::CodeGenOpt::Level CodeGenOptLevel;
246 /*
247 * End of section of GlobalInitializing variables
248 **********************************************/
249
250 /* If given, the name of the target CPU to generate code for. */
251 static std::string CPU;
252
253 /*
254 * The list of target specific features to enable or disable -- this should
255 * be a list of strings starting with '+' (enable) or '-' (disable).
256 */
257 static std::vector<std::string> Features;
258
259 struct Runtime {
260 const char* mName;
261 void* mPtr;
262 };
263 static struct Runtime Runtimes[];
264
265 static void GlobalInitialization() {
266 if(GlobalInitialized) return;
267
268 /* Set Triple, CPU and Features here */
269 Triple = TARGET_TRIPLE_STRING;
270
271#if defined(DEFAULT_ARM_CODEGEN) || defined(PROVIDE_ARM_CODEGEN)
272 LLVMInitializeARMTargetInfo();
273 LLVMInitializeARMTarget();
274#endif
275
276#if defined(DEFAULT_X86_CODEGEN) || defined(PROVIDE_X86_CODEGEN)
277 LLVMInitializeX86TargetInfo();
278 LLVMInitializeX86Target();
279#endif
280
281#if defined(DEFAULT_X64_CODEGEN) || defined(PROVIDE_X64_CODEGEN)
282 LLVMInitializeX86TargetInfo();
283 LLVMInitializeX86Target();
284#endif
285
286 /*
287 * -O0: llvm::CodeGenOpt::None
288 * -O1: llvm::CodeGenOpt::Less
289 * -O2: llvm::CodeGenOpt::Default
290 * -O3: llvm::CodeGenOpt::Aggressive
291 */
292 CodeGenOptLevel = llvm::CodeGenOpt::Aggressive;
293
294 /* Below are the global settings to LLVM */
295
296 /* Disable frame pointer elimination optimization */
297 llvm::NoFramePointerElim = false;
298
299 /*
300 * Use hardfloat ABI
301 *
302 * FIXME: Need to detect the CPU capability and decide whether to use
303 * softfp. To use softfp, change following 2 lines to
304 *
305 * llvm::FloatABIType = llvm::FloatABI::Soft;
306 * llvm::UseSoftFloat = true;
307 */
308 llvm::FloatABIType = llvm::FloatABI::Hard;
309 llvm::UseSoftFloat = false;
310
311 /*
312 * BCC needs all unknown symbols resolved at JIT/compilation time.
313 * So we don't need any dynamic relocation model.
314 */
315 llvm::TargetMachine::setRelocationModel(llvm::Reloc::Static);
316
317#ifdef DEFAULT_X64_CODEGEN
318 /* Data address in X86_64 architecture may reside in a far-away place */
319 llvm::TargetMachine::setCodeModel(llvm::CodeModel::Medium);
320#else
321 /*
322 * This is set for the linker (specify how large of the virtual addresses
323 * we can access for all unknown symbols.)
324 */
325 llvm::TargetMachine::setCodeModel(llvm::CodeModel::Small);
326#endif
327
328 /* Register the scheduler */
329 llvm::RegisterScheduler::setDefault(llvm::createDefaultScheduler);
330
331 /*
332 * Register allocation policy:
333 * createLocalRegisterAllocator: fast but bad quality
334 * createLinearScanRegisterAllocator: not so fast but good quality
335 */
336 llvm::RegisterRegAlloc::setDefault
337 ((CodeGenOptLevel == llvm::CodeGenOpt::None) ?
338 llvm::createLocalRegisterAllocator :
339 llvm::createLinearScanRegisterAllocator);
340
341 GlobalInitialized = true;
342 return;
343 }
344
345 static void LLVMErrorHandler(void *UserData, const std::string &Message) {
346 // std::string* Error = static_cast<std::string*>(UserData);
347 // Error->assign(Message);
348 // return;
349 fprintf(stderr, "%s\n", Message.c_str());
350 exit(1);
351 }
352
353 static const llvm::StringRef PragmaMetadataName;
354
355 private:
356 std::string mError;
357
358 inline bool hasError() const {
359 return !mError.empty();
360 }
361 inline void setError(const char* Error) {
362 mError.assign(Error); // Copying
363 return;
364 }
365 inline void setError(const std::string& Error) {
366 mError = Error;
367 return;
368 }
369
370 typedef std::list< std::pair<std::string, std::string> > PragmaList;
371 PragmaList mPragmas;
372
373 /* Memory manager for the code reside in memory */
374 /*
375 * The memory for our code emitter is very simple and is conforming to the
376 * design decisions of Android RenderScript's Exection Environment:
377 * The code, data, and symbol sizes are limited (currently 100KB.)
378 *
379 * It's very different from typical compiler, which has no limitation
380 * on the code size. How does code emitter know the size of the code
381 * it is about to emit? It does not know beforehand. We want to solve
382 * this without complicating the code emitter too much.
383 *
384 * We solve this by pre-allocating a certain amount of memory,
385 * and then start the code emission. Once the buffer overflows, the emitter
386 * simply discards all the subsequent emission but still has a counter
387 * on how many bytes have been emitted.
388
389 * So once the whole emission is done, if there's a buffer overflow,
390 * it re-allocates the buffer with enough size (based on the
391 * counter from previous emission) and re-emit again.
392 */
393 class CodeMemoryManager : public llvm::JITMemoryManager {
394 /* {{{ */
395 private:
396 static const unsigned int MaxCodeSize = 100 * 1024; /* 100 KiB for code */
397 static const unsigned int MaxGOTSize = 1 * 1024; /* 1 KiB for global
398 offset table (GOT) */
399
400 /*
401 * Our memory layout is as follows:
402 *
403 * The direction of arrows (-> and <-) shows memory's growth direction
404 * when more space is needed.
405 *
406 * @mpCodeMem:
407 * +--------------------------------------------------------------+
408 * | Function Memory ... -> <- ... Global/Stub/GOT |
409 * +--------------------------------------------------------------+
410 * |<------------------ Total: @MaxCodeSize KiB ----------------->|
411 *
412 * Where size of GOT is @MaxGOTSize KiB.
413 *
414 * @mCurFuncMemIdx: The current index (starting from 0) of the last byte
415 * of function code's memoey usage
416 * @mCurGSGMemIdx: The current index (starting from 0) of the last byte
417 * of Global Stub/GOT's memory usage
418 *
419 */
420
421 intptr_t mCurFuncMemIdx;
422 intptr_t mCurGSGMemIdx;
423 llvm::sys::MemoryBlock* mpCodeMem;
424
425 /* GOT Base */
426 uint8_t* mpGOTBase;
427
428 typedef std::map<const llvm::Function*, pair<void* /* start address */,
429 void* /* end address */>
430 > FunctionMapTy;
431 FunctionMapTy mFunctionMap;
432
433 inline intptr_t getFreeMemSize() const {
434 return mCurGSGMemIdx - mCurFuncMemIdx;
435 }
436 inline uint8_t* getCodeMemBase() const {
437 return static_cast<uint8_t*>(mpCodeMem->base());
438 }
439
440 uint8_t* allocateGSGMemory(uintptr_t Size,
441 unsigned Alignment = 1 /* no alignment */)
442 {
443 if(getFreeMemSize() < Size)
444 /* The code size excesses our limit */
445 return NULL;
446
447 if(Alignment == 0)
448 Alignment = 1;
449
450 uint8_t* result = getCodeMemBase() + mCurGSGMemIdx - Size;
451 result = (uint8_t*) (((intptr_t) result) & ~(intptr_t) (Alignment - 1));
452
453 mCurGSGMemIdx = result - getCodeMemBase();
454
455 return result;
456 }
457
458 public:
459 CodeMemoryManager() : mpCodeMem(NULL), mpGOTBase(NULL) {
460 reset();
461 std::string ErrMsg;
462 llvm::sys::MemoryBlock B = llvm::sys::Memory::
463 AllocateRWX(MaxCodeSize, NULL, &ErrMsg);
464 if(B.base() == 0)
465 llvm::llvm_report_error(
466 "Failed to allocate Memory for code emitter\n" + ErrMsg
467 );
468 mpCodeMem = new llvm::sys::MemoryBlock(B.base(), B.size());
469
470 return;
471 }
472
473 /*
474 * setMemoryWritable - When code generation is in progress,
475 * the code pages may need permissions changed.
476 */
477 void setMemoryWritable() {
478 llvm::sys::Memory::setWritable(*mpCodeMem);
479 return;
480 }
481
482 /*
483 * setMemoryExecutable - When code generation is done and we're ready to
484 * start execution, the code pages may need permissions changed.
485 */
486 void setMemoryExecutable() {
487 llvm::sys::Memory::setExecutable(*mpCodeMem);
488 return;
489 }
490
491 /*
492 * setPoisonMemory - Setting this flag to true makes the memory manager
493 * garbage values over freed memory. This is useful for testing and
494 * debugging, and is to be turned on by default in debug mode.
495 */
496 void setPoisonMemory(bool poison) {
497 /* no effect */
498 return;
499 }
500
501 /* Global Offset Table Management */
502
503 /*
504 * AllocateGOT - If the current table requires a Global Offset Table, this
505 * method is invoked to allocate it. This method is required to set HasGOT
506 * to true.
507 */
508 void AllocateGOT() {
509 assert(mpGOTBase != NULL && "Cannot allocate the GOT multiple times");
510 mpGOTBase = allocateGSGMemory(MaxGOTSize);
511 HasGOT = true;
512 return;
513 }
514
515 /*
516 * getGOTBase - If this is managing a Global Offset Table, this method
517 * should return a pointer to its base.
518 */
519 uint8_t* getGOTBase() const {
520 return mpGOTBase;
521 }
522
523 /* Main Allocation Functions */
524
525 /*
526 * startFunctionBody - When we start JITing a function, the JIT calls this
527 * method to allocate a block of free RWX memory, which returns a pointer to
528 * it. If the JIT wants to request a block of memory of at least a certain
529 * size, it passes that value as ActualSize, and this method returns a block
530 * with at least that much space. If the JIT doesn't know ahead of time how
531 * much space it will need to emit the function, it passes 0 for the
532 * ActualSize. In either case, this method is required to pass back the size
533 * of the allocated block through ActualSize. The JIT will be careful to
534 * not write more than the returned ActualSize bytes of memory.
535 */
536 uint8_t* startFunctionBody(const llvm::Function *F, uintptr_t &ActualSize) {
537 if(getFreeMemSize() < ActualSize)
538 /* The code size excesses our limit */
539 return NULL;
540
541 ActualSize = getFreeMemSize();
542 return (getCodeMemBase() + mCurFuncMemIdx);
543 }
544
545 /*
546 * allocateStub - This method is called by the JIT to allocate space for a
547 * function stub (used to handle limited branch displacements) while it is
548 * JIT compiling a function. For example, if foo calls bar, and if bar
549 * either needs to be lazily compiled or is a native function that exists
550 * too
551 * far away from the call site to work, this method will be used to make a
552 * thunk for it. The stub should be "close" to the current function body,
553 * but should not be included in the 'actualsize' returned by
554 * startFunctionBody.
555 */
556 uint8_t* allocateStub(const llvm::GlobalValue* F, unsigned StubSize,
557 unsigned Alignment) {
558 return allocateGSGMemory(StubSize, Alignment);
559 }
560
561 /*
562 * endFunctionBody - This method is called when the JIT is done codegen'ing
563 * the specified function. At this point we know the size of the JIT
564 * compiled function. This passes in FunctionStart (which was returned by
565 * the startFunctionBody method) and FunctionEnd which is a pointer to the
566 * actual end of the function. This method should mark the space allocated
567 * and remember where it is in case the client wants to deallocate it.
568 */
569 void endFunctionBody(const llvm::Function* F, uint8_t* FunctionStart,
570 uint8_t* FunctionEnd) {
571 assert(FunctionEnd > FunctionStart);
572 assert(FunctionStart == (getCodeMemBase() + mCurFuncMemIdx) &&
573 "Mismatched function start/end!");
574
575 /* Advance the pointer */
576 intptr_t FunctionCodeSize = FunctionEnd - FunctionStart;
577 assert(FunctionCodeSize <= getFreeMemSize() &&
578 "Code size excess the limitation!");
579 mCurFuncMemIdx += FunctionCodeSize;
580
581 /* Record there's a function in our memory start from @FunctionStart */
582 assert(mFunctionMap.find(F) == mFunctionMap.end() &&
583 "Function already emitted!");
584 mFunctionMap.insert( make_pair<const llvm::Function*, pair<void*, void*>
585 >(F, make_pair(FunctionStart, FunctionEnd))
586 );
587
588 return;
589 }
590
591 /*
592 * allocateSpace - Allocate a (function code) memory block of the
593 * given size. This method cannot be called between
594 * calls to startFunctionBody and endFunctionBody.
595 */
596 uint8_t* allocateSpace(intptr_t Size, unsigned Alignment) {
597 if(getFreeMemSize() < Size)
598 /* The code size excesses our limit */
599 return NULL;
600
601 if(Alignment == 0)
602 Alignment = 1;
603
604 uint8_t* result = getCodeMemBase() + mCurFuncMemIdx;
605 result = (uint8_t*) (((intptr_t) result + Alignment - 1) &
606 ~(intptr_t) (Alignment - 1)
607 );
608
609 mCurFuncMemIdx = (result + Size) - getCodeMemBase();
610
611 return result;
612 }
613
614 /* allocateGlobal - Allocate memory for a global. */
615 uint8_t* allocateGlobal(uintptr_t Size, unsigned Alignment) {
616 return allocateGSGMemory(Size, Alignment);
617 }
618
619 /*
620 * deallocateFunctionBody - Free the specified function body. The argument
621 * must be the return value from a call to startFunctionBody() that hasn't
622 * been deallocated yet. This is never called when the JIT is currently
623 * emitting a function.
624 */
625 void deallocateFunctionBody(void *Body) {
626 /* linear search */
627 FunctionMapTy::iterator I;
628 for(I = mFunctionMap.begin();
629 I != mFunctionMap.end();
630 I++)
631 if(I->second.first == Body)
632 break;
633
634 assert(I != mFunctionMap.end() && "Memory is never allocated!");
635
636 /* free the memory */
637 uint8_t* FunctionStart = (uint8_t*) I->second.first;
638 uint8_t* FunctionEnd = (uint8_t*) I->second.second;
639 intptr_t SizeNeedMove = (getCodeMemBase() + mCurFuncMemIdx) - FunctionEnd;
640
641 assert(SizeNeedMove >= 0 &&
642 "Internal error: CodeMemoryManager::mCurFuncMemIdx may not"
643 " be correctly calculated!");
644
645 if(SizeNeedMove > 0)
646 /* there's data behind deallocating function */
647 ::memmove(FunctionStart, FunctionEnd, SizeNeedMove);
648 mCurFuncMemIdx -= (FunctionEnd - FunctionStart);
649
650 return;
651 }
652
653 /*
654 * startExceptionTable - When we finished JITing the function, if exception
655 * handling is set, we emit the exception table.
656 */
657 uint8_t* startExceptionTable(const llvm::Function* F, uintptr_t &ActualSize)
658 {
659 assert(false && "Exception is not allowed in our language specification");
660 return NULL;
661 }
662
663 /*
664 * endExceptionTable - This method is called when the JIT is done emitting
665 * the exception table.
666 */
667 void endExceptionTable(const llvm::Function *F, uint8_t *TableStart,
668 uint8_t *TableEnd, uint8_t* FrameRegister) {
669 assert(false && "Exception is not allowed in our language specification");
670 return;
671 }
672
673 /*
674 * deallocateExceptionTable - Free the specified exception table's memory.
675 * The argument must be the return value from a call to
676 * startExceptionTable()
677 * that hasn't been deallocated yet. This is never called when the JIT is
678 * currently emitting an exception table.
679 */
680 void deallocateExceptionTable(void *ET) {
681 assert(false && "Exception is not allowed in our language specification");
682 return;
683 }
684
685 /* Below are the methods we create */
686 void reset() {
687 mpGOTBase = NULL;
688 HasGOT = false;
689
690 mCurFuncMemIdx = 0;
691 mCurGSGMemIdx = MaxCodeSize - 1;
692
693 mFunctionMap.clear();
694
695 return;
696 }
697
698 ~CodeMemoryManager() {
699 if(mpCodeMem != NULL)
700 llvm::sys::Memory::ReleaseRWX(*mpCodeMem);
701 return;
702 }
703 /* }}} */
704 }; /* End of class CodeMemoryManager */
705
706 /* The memory manager for code emitter */
707 llvm::OwningPtr<CodeMemoryManager> mCodeMemMgr;
708 CodeMemoryManager* createCodeMemoryManager() {
709 mCodeMemMgr.reset(new CodeMemoryManager());
710 return mCodeMemMgr.get();
711 }
712
713 /* Code emitter */
714 class CodeEmitter : public llvm::JITCodeEmitter {
715 /* {{{ */
716 public:
717 typedef llvm::DenseMap<const llvm::GlobalValue*, void*> GlobalAddressMapTy;
718 typedef GlobalAddressMapTy::const_iterator global_addresses_const_iterator;
719
720 private:
721 CodeMemoryManager* mpMemMgr;
722
723 /* The JITInfo for the target we are compiling to */
724 llvm::TargetJITInfo* mpTJI;
725
726 const llvm::TargetData* mpTD;
727
728 /*
729 * MBBLocations - This vector is a mapping from MBB ID's to their address.
730 * It is filled in by the StartMachineBasicBlock callback and queried by
731 * the getMachineBasicBlockAddress callback.
732 */
733 std::vector<uintptr_t> mMBBLocations;
734
735 /* ConstantPool - The constant pool for the current function. */
736 llvm::MachineConstantPool* mpConstantPool;
737
738 /* ConstantPoolBase - A pointer to the first entry in the constant pool. */
739 void *mpConstantPoolBase;
740
741 /* ConstPoolAddresses - Addresses of individual constant pool entries. */
742 llvm::SmallVector<uintptr_t, 8> mConstPoolAddresses;
743
744 /* JumpTable - The jump tables for the current function. */
745 llvm::MachineJumpTableInfo *mpJumpTable;
746
747 /* JumpTableBase - A pointer to the first entry in the jump table. */
748 void *mpJumpTableBase;
749
750 /*
751 * When outputting a function stub in the context of some other function, we
752 * save BufferBegin/BufferEnd/CurBufferPtr here.
753 */
754 uint8_t *mpSavedBufferBegin, *mpSavedBufferEnd, *mpSavedCurBufferPtr;
755
756 /* Relocations - These are the relocations that the function needs,
757 as emitted. */
758 std::vector<llvm::MachineRelocation> mRelocations;
759
760 /* LabelLocations - This vector is a mapping from Label ID's to their
761 address. */
762 std::vector<uintptr_t> mLabelLocations;
763
764 class EmittedFunctionCode {
765 public:
766 void* FunctionBody; // Beginning of the function's allocation.
767 void* Code; // The address the function's code actually starts at.
768 int Size; // The size of the function code
769
770 EmittedFunctionCode() : FunctionBody(NULL), Code(NULL) { return; }
771 };
772 EmittedFunctionCode* mpCurEmitFunction;
773
774 typedef std::map<const std::string, EmittedFunctionCode*
775 > EmittedFunctionsMapTy;
776 EmittedFunctionsMapTy mEmittedFunctions;
777
778 /* MMI - Machine module info for exception informations */
779 llvm::MachineModuleInfo* mpMMI;
780
781 GlobalAddressMapTy mGlobalAddressMap;
782
783 /*
784 * UpdateGlobalMapping - Replace an existing mapping for GV with a new
785 * address. This updates both maps as required. If "Addr" is null, the
786 * entry for the global is removed from the mappings.
787 */
788 void* UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
789 if(Addr == NULL) {
790 /* Removing mapping */
791 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
792 void *OldVal;
793
794 if(I == mGlobalAddressMap.end())
795 OldVal = NULL;
796 else {
797 OldVal = I->second;
798 mGlobalAddressMap.erase(I);
799 }
800
801 return OldVal;
802 }
803
804 void*& CurVal = mGlobalAddressMap[GV];
805 void* OldVal = CurVal;
806
807 CurVal = Addr;
808
809 return OldVal;
810 }
811
812 /*
813 * AddGlobalMapping - Tell the execution engine that the specified global is
814 * at the specified location. This is used internally as functions are
815 * JIT'd
816 * and as global variables are laid out in memory.
817 */
818 void AddGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
819 void*& CurVal = mGlobalAddressMap[GV];
820 assert((CurVal == 0 || Addr == 0) && "GlobalMapping already established!");
821 CurVal = Addr;
822 return;
823 }
824
825 /*
826 * GetPointerToGlobalIfAvailable - This returns the address of the specified
827 * global value if it is has already been codegen'd,
828 * otherwise it returns null.
829 */
830 void* GetPointerToGlobalIfAvailable(const llvm::GlobalValue* GV) const {
831 GlobalAddressMapTy::const_iterator I = mGlobalAddressMap.find(GV);
832 return ((I != mGlobalAddressMap.end()) ? I->second : NULL);
833 }
834
835 unsigned int GetConstantPoolSizeInBytes(llvm::MachineConstantPool* MCP) {
836 const std::vector<llvm::MachineConstantPoolEntry>& Constants =
837 MCP->getConstants();
838
839 if(Constants.empty())
840 return 0;
841
842 unsigned int Size = 0;
843 for(int i=0;i<Constants.size();i++) {
844 llvm::MachineConstantPoolEntry CPE = Constants[i];
845 unsigned int AlignMask = CPE.getAlignment() - 1;
846 Size = (Size + AlignMask) & ~AlignMask;
847 const llvm::Type* Ty = CPE.getType();
848 Size += mpTD->getTypeAllocSize(Ty);
849 }
850
851 return Size;
852 }
853
854 /*
855 * This function converts a Constant* into a GenericValue. The interesting
856 * part is if C is a ConstantExpr.
857 */
858 void GetConstantValue(const llvm::Constant *C, llvm::GenericValue& Result) {
859 if(C->getValueID() == llvm::Value::UndefValueVal)
860 return;
861 else if(C->getValueID() == llvm::Value::ConstantExprVal) {
862 const llvm::ConstantExpr* CE = (llvm::ConstantExpr*) C;
863 const llvm::Constant* Op0 = CE->getOperand(0);
864
865 switch(CE->getOpcode()) {
866 case llvm::Instruction::GetElementPtr:
867 {
868 /* Compute the index */
869 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
870 CE->op_end());
871 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
872 &Indices[0],
873 Indices.size());
874
875 GetConstantValue(Op0, Result);
876 Result.PointerVal = (char*) Result.PointerVal + Offset;
877
878 return;
879 }
880 break;
881
882 case llvm::Instruction::Trunc:
883 {
884 uint32_t BitWidth =
885 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
886
887 GetConstantValue(Op0, Result);
888 Result.IntVal = Result.IntVal.trunc(BitWidth);
889
890 return;
891 }
892 break;
893
894 case llvm::Instruction::ZExt:
895 {
896 uint32_t BitWidth =
897 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
898
899 GetConstantValue(Op0, Result);
900 Result.IntVal = Result.IntVal.zext(BitWidth);
901
902 return;
903 }
904 break;
905
906 case llvm::Instruction::SExt:
907 {
908 uint32_t BitWidth =
909 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
910
911 GetConstantValue(Op0, Result);
912 Result.IntVal = Result.IntVal.sext(BitWidth);
913
914 return;
915 }
916 break;
917
918
919 case llvm::Instruction::FPTrunc:
920 {
921 /* FIXME long double */
922 GetConstantValue(Op0, Result);
923 Result.FloatVal = float(Result.DoubleVal);
924 return;
925 }
926 break;
927
928
929 case llvm::Instruction::FPExt:
930 {
931 /* FIXME long double */
932 GetConstantValue(Op0, Result);
933 Result.DoubleVal = double(Result.FloatVal);
934 return;
935 }
936 break;
937
938
939 case llvm::Instruction::UIToFP:
940 {
941 GetConstantValue(Op0, Result);
942 if(CE->getType()->isFloatTy())
943 Result.FloatVal = float(Result.IntVal.roundToDouble());
944 else if(CE->getType()->isDoubleTy())
945 Result.DoubleVal = Result.IntVal.roundToDouble();
946 else if(CE->getType()->isX86_FP80Ty()) {
947 const uint64_t zero[] = { 0, 0 };
948 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
949 apf.convertFromAPInt(Result.IntVal,
950 false,
951 llvm::APFloat::rmNearestTiesToEven);
952 Result.IntVal = apf.bitcastToAPInt();
953 }
954 return;
955 }
956 break;
957
958 case llvm::Instruction::SIToFP:
959 {
960 GetConstantValue(Op0, Result);
961 if(CE->getType()->isFloatTy())
962 Result.FloatVal = float(Result.IntVal.signedRoundToDouble());
963 else if(CE->getType()->isDoubleTy())
964 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
965 else if(CE->getType()->isX86_FP80Ty()) {
966 const uint64_t zero[] = { 0, 0 };
967 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
968 apf.convertFromAPInt(Result.IntVal,
969 true,
970 llvm::APFloat::rmNearestTiesToEven);
971 Result.IntVal = apf.bitcastToAPInt();
972 }
973 return;
974 }
975 break;
976
977 /* double->APInt conversion handles sign */
978 case llvm::Instruction::FPToUI:
979 case llvm::Instruction::FPToSI:
980 {
981 uint32_t BitWidth =
982 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
983
984 GetConstantValue(Op0, Result);
985 if(Op0->getType()->isFloatTy())
986 Result.IntVal =
987 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
988 else if(Op0->getType()->isDoubleTy())
989 Result.IntVal =
990 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal, BitWidth);
991 else if(Op0->getType()->isX86_FP80Ty()) {
992 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
993 uint64_t v;
994 bool ignored;
995 apf.convertToInteger(&v,
996 BitWidth,
997 CE->getOpcode()
998 == llvm::Instruction::FPToSI,
999 llvm::APFloat::rmTowardZero,
1000 &ignored);
1001 Result.IntVal = v; // endian?
1002 }
1003 return;
1004 }
1005 break;
1006
1007 case llvm::Instruction::PtrToInt:
1008 {
1009 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
1010
1011 GetConstantValue(Op0, Result);
1012 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
1013 (Result.PointerVal));
1014
1015 return;
1016 }
1017 break;
1018
1019 case llvm::Instruction::IntToPtr:
1020 {
1021 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
1022
1023 GetConstantValue(Op0, Result);
1024 if(PtrWidth != Result.IntVal.getBitWidth())
1025 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
1026 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
1027
1028 Result.PointerVal = llvm::PointerTy
1029 (uintptr_t(Result.IntVal.getZExtValue()));
1030
1031 return;
1032 }
1033 break;
1034
1035 case llvm::Instruction::BitCast:
1036 {
1037 GetConstantValue(Op0, Result);
1038 const llvm::Type* DestTy = CE->getType();
1039
1040 switch(Op0->getType()->getTypeID()) {
1041 case llvm::Type::IntegerTyID:
1042 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
1043 if(DestTy->isFloatTy())
1044 Result.FloatVal = Result.IntVal.bitsToFloat();
1045 else if(DestTy->isDoubleTy())
1046 Result.DoubleVal = Result.IntVal.bitsToDouble();
1047 break;
1048
1049 case llvm::Type::FloatTyID:
1050 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
1051 Result.IntVal.floatToBits(Result.FloatVal);
1052 break;
1053
1054 case llvm::Type::DoubleTyID:
1055 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
1056 Result.IntVal.doubleToBits(Result.DoubleVal);
1057 break;
1058
1059 case llvm::Type::PointerTyID:
1060 assert(DestTy->isPointerTy() && "Invalid bitcast");
1061 break; // getConstantValue(Op0) above already converted it
1062
1063 default:
1064 llvm_unreachable("Invalid bitcast operand");
1065 break;
1066 }
1067
1068 return;
1069 }
1070 break;
1071
1072 case llvm::Instruction::Add:
1073 case llvm::Instruction::FAdd:
1074 case llvm::Instruction::Sub:
1075 case llvm::Instruction::FSub:
1076 case llvm::Instruction::Mul:
1077 case llvm::Instruction::FMul:
1078 case llvm::Instruction::UDiv:
1079 case llvm::Instruction::SDiv:
1080 case llvm::Instruction::URem:
1081 case llvm::Instruction::SRem:
1082 case llvm::Instruction::And:
1083 case llvm::Instruction::Or:
1084 case llvm::Instruction::Xor:
1085 {
1086 llvm::GenericValue LHS, RHS;
1087 GetConstantValue(Op0, LHS);
1088 GetConstantValue(CE->getOperand(1), RHS);
1089
1090 switch(Op0->getType()->getTypeID()) {
1091 case llvm::Type::IntegerTyID:
1092 switch (CE->getOpcode()) {
1093 case llvm::Instruction::Add:
1094 Result.IntVal = LHS.IntVal + RHS.IntVal;
1095 break;
1096 case llvm::Instruction::Sub:
1097 Result.IntVal = LHS.IntVal - RHS.IntVal;
1098 break;
1099 case llvm::Instruction::Mul:
1100 Result.IntVal = LHS.IntVal * RHS.IntVal;
1101 break;
1102 case llvm::Instruction::UDiv:
1103 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
1104 break;
1105 case llvm::Instruction::SDiv:
1106 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
1107 break;
1108 case llvm::Instruction::URem:
1109 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
1110 break;
1111 case llvm::Instruction::SRem:
1112 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
1113 break;
1114 case llvm::Instruction::And:
1115 Result.IntVal = LHS.IntVal & RHS.IntVal;
1116 break;
1117 case llvm::Instruction::Or:
1118 Result.IntVal = LHS.IntVal | RHS.IntVal;
1119 break;
1120 case llvm::Instruction::Xor:
1121 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
1122 break;
1123 default:
1124 llvm_unreachable("Invalid integer opcode");
1125 break;
1126 }
1127 break;
1128
1129 case llvm::Type::FloatTyID:
1130 switch (CE->getOpcode()) {
1131 case llvm::Instruction::FAdd:
1132 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
1133 break;
1134 case llvm::Instruction::FSub:
1135 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
1136 break;
1137 case llvm::Instruction::FMul:
1138 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
1139 break;
1140 case llvm::Instruction::FDiv:
1141 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
1142 break;
1143 case llvm::Instruction::FRem:
1144 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
1145 break;
1146 default:
1147 llvm_unreachable("Invalid float opcode");
1148 break;
1149 }
1150 break;
1151
1152 case llvm::Type::DoubleTyID:
1153 switch (CE->getOpcode()) {
1154 case llvm::Instruction::FAdd:
1155 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
1156 break;
1157 case llvm::Instruction::FSub:
1158 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
1159 break;
1160 case llvm::Instruction::FMul:
1161 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
1162 break;
1163 case llvm::Instruction::FDiv:
1164 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
1165 break;
1166 case llvm::Instruction::FRem:
1167 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
1168 break;
1169 default:
1170 llvm_unreachable("Invalid double opcode");
1171 break;
1172 }
1173 break;
1174
1175 case llvm::Type::X86_FP80TyID:
1176 case llvm::Type::PPC_FP128TyID:
1177 case llvm::Type::FP128TyID:
1178 {
1179 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
1180 switch (CE->getOpcode()) {
1181 case llvm::Instruction::FAdd:
1182 apfLHS.add(llvm::APFloat(RHS.IntVal),
1183 llvm::APFloat::rmNearestTiesToEven);
1184 break;
1185 case llvm::Instruction::FSub:
1186 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
1187 llvm::APFloat::rmNearestTiesToEven);
1188 break;
1189 case llvm::Instruction::FMul:
1190 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
1191 llvm::APFloat::rmNearestTiesToEven);
1192 break;
1193 case llvm::Instruction::FDiv:
1194 apfLHS.divide(llvm::APFloat(RHS.IntVal),
1195 llvm::APFloat::rmNearestTiesToEven);
1196 break;
1197 case llvm::Instruction::FRem:
1198 apfLHS.mod(llvm::APFloat(RHS.IntVal),
1199 llvm::APFloat::rmNearestTiesToEven);
1200 break;
1201 default:
1202 llvm_unreachable("Invalid long double opcode");
1203 llvm_unreachable(0);
1204 break;
1205 }
1206
1207 Result.IntVal = apfLHS.bitcastToAPInt();
1208 }
1209 break;
1210
1211 default:
1212 llvm_unreachable("Bad add type!");
1213 break;
1214 } /* End switch(Op0->getType()->getTypeID()) */
1215
1216 return;
1217 }
1218
1219 default:
1220 break;
1221 } /* End switch(CE->getOpcode()) */
1222
1223 std::string msg;
1224 llvm::raw_string_ostream Msg(msg);
1225 Msg << "ConstantExpr not handled: " << *CE;
1226 llvm::llvm_report_error(Msg.str());
1227 } /* C->getValueID() == llvm::Value::ConstantExprVal */
1228
1229 switch (C->getType()->getTypeID()) {
1230 case llvm::Type::FloatTyID:
1231 Result.FloatVal = llvm::cast<llvm::ConstantFP>(C)
1232 ->getValueAPF().convertToFloat();
1233 break;
1234
1235 case llvm::Type::DoubleTyID:
1236 Result.DoubleVal = llvm::cast<llvm::ConstantFP>(C)
1237 ->getValueAPF().convertToDouble();
1238 break;
1239
1240 case llvm::Type::X86_FP80TyID:
1241 case llvm::Type::FP128TyID:
1242 case llvm::Type::PPC_FP128TyID:
1243 Result.IntVal = llvm::cast <llvm::ConstantFP>(C)
1244 ->getValueAPF().bitcastToAPInt();
1245 break;
1246
1247 case llvm::Type::IntegerTyID:
1248 Result.IntVal = llvm::cast<llvm::ConstantInt>(C)
1249 ->getValue();
1250 break;
1251
1252 case llvm::Type::PointerTyID:
1253 switch(C->getValueID()) {
1254 case llvm::Value::ConstantPointerNullVal:
1255 Result.PointerVal = NULL;
1256 break;
1257
1258 case llvm::Value::FunctionVal:
1259 {
1260 const llvm::Function* F = (llvm::Function*) C;
1261 Result.PointerVal = GetPointerToFunctionOrStub
1262 (const_cast<llvm::Function*>(F)
1263 );
1264 }
1265 break;
1266
1267 case llvm::Value::GlobalVariableVal:
1268 {
1269 const llvm::GlobalVariable* GV = (llvm::GlobalVariable*) C;
1270 Result.PointerVal = GetOrEmitGlobalVariable
1271 (const_cast<llvm::GlobalVariable*>(GV)
1272 );
1273 }
1274 break;
1275
1276 case llvm::Value::BlockAddressVal:
1277 {
1278 // const llvm::BlockAddress* BA = (llvm::BlockAddress*) C;
1279 // Result.PointerVal = getPointerToBasicBlock
1280 // (const_cast<llvm::BasicBlock*>(BA->getBasicBlock()));
1281 assert(false && "JIT does not support address-of-label yet!");
1282 }
1283 break;
1284
1285 default:
1286 llvm_unreachable("Unknown constant pointer type!");
1287 break;
1288 }
1289 break;
1290
1291 default:
1292 std::string msg;
1293 llvm::raw_string_ostream Msg(msg);
1294 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
1295 llvm::llvm_report_error(Msg.str());
1296 break;
1297 }
1298
1299 return;
1300 }
1301
1302 /*
1303 * StoreValueToMemory -
1304 * Stores the data in @Val of type @Ty at address @Addr.
1305 */
1306 void StoreValueToMemory(const llvm::GenericValue& Val, void* Addr,
1307 const llvm::Type *Ty) {
1308 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
1309
1310 switch(Ty->getTypeID()) {
1311 case llvm::Type::IntegerTyID:
1312 {
1313 const llvm::APInt& IntVal = Val.IntVal;
1314 assert((IntVal.getBitWidth() + 7) / 8 >= StoreBytes &&
1315 "Integer too small!");
1316
1317 uint8_t *Src = (uint8_t*) IntVal.getRawData();
1318
1319 if(llvm::sys::isLittleEndianHost()) {
1320 /*
1321 * Little-endian host - the source is ordered from LSB to MSB.
1322 * Order the destination from LSB to MSB: Do a straight copy.
1323 */
1324 memcpy(Addr, Src, StoreBytes);
1325 } else {
1326 /*
1327 * Big-endian host - the source is an array of 64 bit words
1328 * ordered from LSW to MSW.
1329 *
1330 * Each word is ordered from MSB to LSB.
1331 *
1332 * Order the destination from MSB to LSB:
1333 * Reverse the word order, but not the bytes in a word.
1334 */
1335 unsigned int i = StoreBytes;
1336 while(i > sizeof(uint64_t)) {
1337 i -= sizeof(uint64_t);
1338 memcpy((uint8_t*) Addr + i, Src, sizeof(uint64_t));
1339 Src += sizeof(uint64_t);
1340 }
1341
1342 memcpy(Addr, Src + sizeof(uint64_t) - i, i);
1343 }
1344 }
1345 break;
1346
1347 case llvm::Type::FloatTyID:
1348 {
1349 *((float*) Addr) = Val.FloatVal;
1350 }
1351 break;
1352
1353 case llvm::Type::DoubleTyID:
1354 {
1355 *((double*) Addr) = Val.DoubleVal;
1356 }
1357 break;
1358
1359 case llvm::Type::X86_FP80TyID:
1360 {
1361 memcpy(Addr, Val.IntVal.getRawData(), 10);
1362 }
1363 break;
1364
1365 case llvm::Type::PointerTyID:
1366 {
1367 /*
1368 * Ensure 64 bit target pointers are fully
1369 * initialized on 32 bit hosts.
1370 */
1371 if(StoreBytes != sizeof(llvm::PointerTy))
1372 memset(Addr, 0, StoreBytes);
1373 *((llvm::PointerTy*) Addr) = Val.PointerVal;
1374 }
1375 break;
1376
1377 default:
1378 break;
1379 }
1380
1381 if(llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
1382 std::reverse((uint8_t*) Addr, (uint8_t*) Addr + StoreBytes);
1383
1384 return;
1385 }
1386
1387 /*
1388 * InitializeConstantToMemory -
1389 * Recursive function to apply a @Constant value into the
1390 * specified memory location @Addr.
1391 */
1392 void InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
1393 switch(C->getValueID()) {
1394 case llvm::Value::UndefValueVal:
1395 // Nothing to do
1396 break;
1397
1398 case llvm::Value::ConstantVectorVal:
1399 {
1400 // dynamic cast may hurt performance
1401 const llvm::ConstantVector* CP = (llvm::ConstantVector*) C;
1402
1403 unsigned int ElementSize = mpTD->getTypeAllocSize
1404 (CP->getType()->getElementType());
1405
1406 for(int i=0;i<CP->getNumOperands();i++)
1407 InitializeConstantToMemory(CP->getOperand(i),
1408 (char*) Addr + i * ElementSize);
1409 }
1410 break;
1411
1412 case llvm::Value::ConstantAggregateZeroVal:
1413 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
1414 break;
1415
1416 case llvm::Value::ConstantArrayVal:
1417 {
1418 const llvm::ConstantArray* CPA = (llvm::ConstantArray*) C;
1419 unsigned int ElementSize = mpTD->getTypeAllocSize
1420 (CPA->getType()->getElementType());
1421
1422 for(int i=0;i<CPA->getNumOperands();i++)
1423 InitializeConstantToMemory(CPA->getOperand(i),
1424 (char*) Addr + i * ElementSize);
1425 }
1426 break;
1427
1428 case llvm::Value::ConstantStructVal:
1429 {
1430 const llvm::ConstantStruct* CPS = (llvm::ConstantStruct*) C;
1431 const llvm::StructLayout* SL = mpTD->getStructLayout
1432 (llvm::cast<llvm::StructType>(CPS->getType()));
1433
1434 for(int i=0;i<CPS->getNumOperands();i++)
1435 InitializeConstantToMemory(CPS->getOperand(i),
1436 (char*) Addr +
1437 SL->getElementOffset(i));
1438 }
1439 break;
1440
1441 default:
1442 {
1443 if(C->getType()->isFirstClassType()) {
1444 llvm::GenericValue Val;
1445 GetConstantValue(C, Val);
1446 StoreValueToMemory(Val, Addr, C->getType());
1447 } else
1448 llvm_unreachable
1449 ("Unknown constant type to initialize memory with!");
1450 }
1451 break;
1452 }
1453
1454 return;
1455 }
1456
1457 void emitConstantPool(llvm::MachineConstantPool *MCP) {
1458 if(mpTJI->hasCustomConstantPool())
1459 return;
1460
1461 /*
1462 * Constant pool address resolution is handled by the target itself in ARM
1463 * (TargetJITInfo::hasCustomConstantPool() return true).
1464 */
1465#if !defined(PROVIDE_ARM_CODEGEN)
1466 const std::vector<llvm::MachineConstantPoolEntry>& Constants =
1467 MCP->getConstants();
1468
1469 if(Constants.empty())
1470 return;
1471
1472 unsigned Size = GetConstantPoolSizeInBytes(MCP);
1473 unsigned Align = MCP->getConstantPoolAlignment();
1474
1475 mpConstantPoolBase = allocateSpace(Size, Align);
1476 mpConstantPool = MCP;
1477
1478 if(mpConstantPoolBase == NULL)
1479 return; /* out of memory */
1480
1481 unsigned Offset = 0;
1482 for(int i=0;i<Constants.size();i++) {
1483 llvm::MachineConstantPoolEntry CPE = Constants[i];
1484 unsigned AlignMask = CPE.getAlignment() - 1;
1485 Offset = (Offset + AlignMask) & ~AlignMask;
1486
1487 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
1488 mConstPoolAddresses.push_back(CAddr);
1489
1490 if(CPE.isMachineConstantPoolEntry())
1491 llvm::llvm_report_error
1492 ("Initialize memory with machine specific constant pool"
1493 " entry has not been implemented!");
1494
1495 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
1496
1497 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
1498 Offset += mpTD->getTypeAllocSize(Ty);
1499 }
1500#endif
1501 return;
1502 }
1503
1504 void initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
1505 if(mpTJI->hasCustomJumpTables())
1506 return;
1507
1508 const std::vector<llvm::MachineJumpTableEntry>& JT =
1509 MJTI->getJumpTables();
1510 if(JT.empty())
1511 return;
1512
1513 unsigned NumEntries = 0;
1514 for(int i=0;i<JT.size();i++)
1515 NumEntries += JT[i].MBBs.size();
1516
1517 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
1518
1519 mpJumpTable = MJTI;;
1520 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
1521 MJTI->getEntryAlignment(*mpTD));
1522
1523 return;
1524 }
1525
1526 void emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
1527 if(mpTJI->hasCustomJumpTables())
1528 return;
1529
1530 const std::vector<llvm::MachineJumpTableEntry>& JT =
1531 MJTI->getJumpTables();
1532 if(JT.empty() || mpJumpTableBase == 0)
1533 return;
1534
1535 assert((llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static)
1536 && "Cross JIT'ing?");
1537 assert(MJTI->getEntrySize(*mpTD) == sizeof(void*) && "Cross JIT'ing?");
1538
1539 /*
1540 * For each jump table, map each target in the jump table to the
1541 * address of an emitted MachineBasicBlock.
1542 */
1543 intptr_t *SlotPtr = (intptr_t*) mpJumpTableBase;
1544 for(int i=0;i<JT.size();i++) {
1545 const std::vector<llvm::MachineBasicBlock*>& MBBs = JT[i].MBBs;
1546 /*
1547 * Store the address of the basic block for this jump table slot in the
1548 * memory we allocated for the jump table in 'initJumpTableInfo'
1549 */
1550 for(int j=0;j<MBBs.size();j++)
1551 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
1552 }
1553 }
1554
1555 void* GetPointerToGlobal(llvm::GlobalValue* V, void* Reference,
1556 bool MayNeedFarStub) {
1557 switch(V->getValueID()) {
1558 case llvm::Value::FunctionVal:
1559 {
1560 llvm::Function* F = (llvm::Function*) V;
Shih-wei Liao77ed6142010-04-07 12:21:42 -07001561
Shih-wei Liao800e9c22010-04-18 16:08:16 -07001562 /* If we have code, go ahead and return that. */
1563 if(void* ResultPtr = GetPointerToGlobalIfAvailable(F))
1564 return ResultPtr;
1565
1566 if(void* FnStub = GetLazyFunctionStubIfAvailable(F))
1567 /*
1568 * Return the function stub if it's already created.
1569 * We do this first so that:
1570 * we're returning the same address for the function
1571 * as any previous call.
1572 *
1573 * TODO: Yes, this is wrong. The lazy stub isn't guaranteed
1574 * to be close enough to call.
1575 */
1576 return FnStub;
1577
Shih-wei Liao77ed6142010-04-07 12:21:42 -07001578 /*
1579 * If we know the target can handle arbitrary-distance calls, try to
1580 * return a direct pointer.
1581 */
1582 if(!MayNeedFarStub) {
Shih-wei Liao77ed6142010-04-07 12:21:42 -07001583 /*
1584 * x86_64 architecture may encounter the bug
1585 * http://hlvm.llvm.org/bugs/show_bug.cgi?id=5201
1586 * which generate instruction "call" instead of "callq".
1587 *
1588 * And once the real address of stub is
1589 * greater than 64-bit long, the replacement will truncate
1590 * to 32-bit resulting a serious problem.
1591 */
1592#if !defined(__x86_64__)
1593 /*
1594 * If this is an external function pointer,
1595 * we can force the JIT to
1596 * 'compile' it, which really just adds it to the map.
1597 */
1598 if(F->isDeclaration() || F->hasAvailableExternallyLinkage())
1599 return GetPointerToFunction(F, /* AbortOnFailure */true);
1600#endif
1601 }
1602
1603 /*
1604 * Otherwise, we may need a to emit a stub, and, conservatively, we
1605 * always do so.
1606 */
1607 return GetLazyFunctionStub(F);
1608 }
1609 break;
1610
1611 case llvm::Value::GlobalVariableVal:
1612 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
1613 break;
1614
1615 case llvm::Value::GlobalAliasVal:
1616 {
1617 llvm::GlobalAlias* GA = (llvm::GlobalAlias*) V;
1618 const llvm::GlobalValue* GV = GA->resolveAliasedGlobal(false);
1619
1620 switch(GV->getValueID()) {
1621 case llvm::Value::FunctionVal:
1622 /* FIXME: is there's any possibility that the function
1623 is not code-gen'd? */
1624 return GetPointerToFunction(
1625 const_cast<llvm::Function*>((const llvm::Function*) GV),
1626 /* AbortOnFailure */true
1627 );
1628 break;
1629
1630 case llvm::Value::GlobalVariableVal:
1631 {
1632 if(void* p = mGlobalAddressMap[GV])
1633 return p;
1634
1635 llvm::GlobalVariable* GVar = (llvm::GlobalVariable*) GV;
1636 EmitGlobalVariable(GVar);
1637
1638 return mGlobalAddressMap[GV];
1639 }
1640 break;
1641
1642 case llvm::Value::GlobalAliasVal:
1643 assert(false && "Alias should be resolved ultimately!");
1644 break;
1645 }
1646 }
1647 break;
1648
1649 default:
1650 break;
1651 }
1652
1653 llvm_unreachable("Unknown type of global value!");
1654
1655 }
1656
1657 /*
1658 * GetPointerToFunctionOrStub - If the specified function has been
1659 * code-gen'd, return a pointer to the function.
1660 * If not, compile it, or use
1661 * a stub to implement lazy compilation if available.
1662 */
1663 void* GetPointerToFunctionOrStub(llvm::Function* F) {
1664 /*
1665 * If we have already code generated the function,
1666 * just return the address.
1667 */
1668 if(void* Addr = GetPointerToGlobalIfAvailable(F))
1669 return Addr;
1670
1671 /* Get a stub if the target supports it. */
1672 return GetLazyFunctionStub(F);
1673 }
1674
1675 typedef llvm::DenseMap<llvm::Function*, void*> FunctionToLazyStubMapTy;
1676 FunctionToLazyStubMapTy mFunctionToLazyStubMap;
1677
1678 void* GetLazyFunctionStubIfAvailable(llvm::Function* F) {
1679 return mFunctionToLazyStubMap.lookup(F);
1680 }
1681
1682 std::set<llvm::Function*> PendingFunctions;
1683 void* GetLazyFunctionStub(llvm::Function* F) {
1684 /* If we already have a lazy stub for this function, recycle it. */
1685 void*& Stub = mFunctionToLazyStubMap[F];
1686 if(Stub)
1687 return Stub;
1688
1689 /*
1690 * In any cases, we should NOT resolve function at runtime
1691 * (though we are able to).
1692 * We resolve this right now.
1693 */
1694 void* Actual = NULL;
1695 if(F->isDeclaration() || F->hasAvailableExternallyLinkage())
1696 Actual = GetPointerToFunction(F, /* AbortOnFailure */true);
1697
1698 /*
1699 * Codegen a new stub, calling the actual address of
1700 * the external function, if it was resolved.
1701 */
1702 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1703 startGVStub(F, SL.Size, SL.Alignment);
1704 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
1705 finishGVStub();
1706
1707 /*
1708 * We really want the address of the stub in the GlobalAddressMap
1709 * for the JIT, not the address of the external function.
1710 */
1711 UpdateGlobalMapping(F, Stub);
1712
1713 if(!Actual)
1714 PendingFunctions.insert(F);
1715 else
1716 Disassembler(F->getNameStr() + " (stub)",
1717 (uint8_t*) Stub, SL.Size, (uintptr_t) Stub);
1718
1719 return Stub;
1720 }
1721
1722 /* Our resolver to undefined symbol */
1723 BCCSymbolLookupFn mpSymbolLookupFn;
1724 void* mpSymbolLookupContext;
1725
1726 void* GetPointerToFunction(llvm::Function* F, bool AbortOnFailure) {
1727 void* Addr = GetPointerToGlobalIfAvailable(F);
1728 if(Addr)
1729 return Addr;
1730
1731 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
1732 "Internal error: only external defined function routes here!");
1733
1734 /* Handle the failure resolution by ourselves. */
1735 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
1736 /* AbortOnFailure */ false);
1737
1738 /*
1739 * If we resolved the symbol to a null address (eg. a weak external)
1740 * return a null pointer let the application handle it.
1741 */
1742 if(Addr == NULL)
1743 if(AbortOnFailure)
1744 llvm::llvm_report_error
1745 ("Could not resolve external function address: " + F->getName()
1746 );
1747 else
1748 return NULL;
1749
1750 AddGlobalMapping(F, Addr);
1751
1752 return Addr;
1753 }
1754
1755 void* GetPointerToNamedSymbol(const std::string& Name,
1756 bool AbortOnFailure) {
1757 if(void* Addr = FindRuntimeFunction(Name.c_str()))
1758 return Addr;
1759
1760 if(mpSymbolLookupFn)
1761 if(void* Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1762 return Addr;
1763
1764 if(AbortOnFailure)
1765 llvm::llvm_report_error("Program used external symbol '" + Name +
1766 "' which could not be resolved!");
1767
1768 return NULL;
1769 }
1770
1771 /*
1772 * GetOrEmitGlobalVariable - Return the address of the specified global
1773 * variable, possibly emitting it to memory if needed. This is used by the
1774 * Emitter.
1775 */
1776 void* GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1777 void* Ptr = GetPointerToGlobalIfAvailable(GV);
1778 if(Ptr)
1779 return Ptr;
1780
1781 if(GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1782 /* If the global is external, just remember the address. */
1783 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1784 AddGlobalMapping(GV, Ptr);
1785 } else {
1786 /* If the global hasn't been emitted to memory yet,
1787 allocate space and emit it into memory. */
1788 Ptr = GetMemoryForGV(GV);
1789 AddGlobalMapping(GV, Ptr);
1790 EmitGlobalVariable(GV);
1791 }
1792
1793 return Ptr;
1794 }
1795
1796 /*
1797 * GetMemoryForGV - This method abstracts memory allocation of global
1798 * variable so that the JIT can allocate thread local variables depending
1799 * on the target.
1800 */
1801 void* GetMemoryForGV(const llvm::GlobalVariable* GV) {
1802 char* Ptr;
1803
1804 const llvm::Type* GlobalType = GV->getType()->getElementType();
1805 size_t S = mpTD->getTypeAllocSize(GlobalType);
1806 size_t A = mpTD->getPreferredAlignment(GV);
1807
1808 if(GV->isThreadLocal()) {
1809 /*
1810 * We can support TLS by
1811 *
1812 * Ptr = TJI.allocateThreadLocalMemory(S);
1813 *
1814 * But I tend not to .
1815 * (should we disable this in the front-end (i.e. slang)?).
1816 */
1817 llvm::llvm_report_error
1818 ("Compilation of Thread Local Storage (TLS) is disabled!");
1819
1820 } else if(mpTJI->allocateSeparateGVMemory()) {
1821 /*
1822 * On the Apple's ARM target (such as iPhone),
1823 * the global variable should be
1824 * placed in separately allocated heap memory rather than in the same
1825 * code memory.
1826 * The question is, how about the Android?
1827 */
1828 if(A <= 8) {
1829 Ptr = (char*) malloc(S);
1830 } else {
1831 /*
1832 * Allocate (S + A) bytes of memory,
1833 * then use an aligned pointer within that space.
1834 */
1835 Ptr = (char*) malloc(S + A);
1836 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1837 Ptr = Ptr + (MisAligned ? (A - MisAligned) : 0);
1838 }
1839 } else {
1840 Ptr = (char*) allocateGlobal(S, A);
1841 }
1842
1843 return Ptr;
1844 }
1845
1846 void EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1847 void* GA = GetPointerToGlobalIfAvailable(GV);
1848
1849 if(GV->isThreadLocal())
1850 llvm::llvm_report_error("We don't support Thread Local Storage (TLS)!");
1851
1852 if(GA == NULL) {
1853 /* If it's not already specified, allocate memory for the global. */
1854 GA = GetMemoryForGV(GV);
1855 AddGlobalMapping(GV, GA);
1856 }
1857
1858 InitializeConstantToMemory(GV->getInitializer(), GA);
1859
1860 /* You can do some statistics on global variable here */
1861 return;
1862 }
1863
1864 typedef std::map<llvm::AssertingVH<llvm::GlobalValue>, void*
1865 > GlobalToIndirectSymMapTy;
1866 GlobalToIndirectSymMapTy GlobalToIndirectSymMap;
1867
1868 void* GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1869 /*
1870 * Make sure GV is emitted first, and create a stub containing the fully
1871 * resolved address.
1872 */
1873 void* GVAddress = GetPointerToGlobal(V, Reference, false);
1874
1875 /* If we already have a stub for this global variable, recycle it. */
1876 void*& IndirectSym = GlobalToIndirectSymMap[V];
1877 /* Otherwise, codegen a new indirect symbol. */
1878 if(!IndirectSym)
1879 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1880
1881 return IndirectSym;
1882 }
1883
1884 /*
1885 * ExternalFnToStubMap - This is the equivalent of FunctionToLazyStubMap
1886 * for external functions.
1887 *
1888 * TODO: Of course, external functions don't need a lazy stub.
1889 * It's actually
1890 * here to make it more likely that far calls succeed, but no single
1891 * stub can guarantee that. I'll remove this in a subsequent checkin
1892 * when I actually fix far calls. (comment from LLVM source)
1893 */
1894 std::map<void*, void*> ExternalFnToStubMap;
1895
1896 /*
1897 * GetExternalFunctionStub - Return a stub for the function at the
1898 * specified address.
1899 */
1900 void* GetExternalFunctionStub(void* FnAddr) {
1901 void*& Stub = ExternalFnToStubMap[FnAddr];
1902 if(Stub)
1903 return Stub;
1904
1905 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1906 startGVStub(0, SL.Size, SL.Alignment);
1907 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1908 finishGVStub();
1909
1910 return Stub;
1911 }
1912
1913
1914 void Disassembler(const std::string& Name, uint8_t* Start,
1915 size_t Length, uintptr_t PC) {
1916#if defined(USE_DISASSEMBLER)
1917 FILE* out = stdout;
1918
1919 fprintf(out, "JIT: Disassembled code: %s\n", Name.c_str());
1920
1921 disassemble_info disasm_info;
1922 int (*print_insn)(bfd_vma pc, disassemble_info *info);
1923
1924 INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf);
1925
1926 disasm_info.buffer = Start;
1927 disasm_info.buffer_vma = (bfd_vma) (uintptr_t) Start;
1928 disasm_info.buffer_length = Length;
1929 disasm_info.endian = BFD_ENDIAN_LITTLE;
1930
1931#if defined(DEFAULT_X86_CODEGEN)
1932 disasm_info.mach = bfd_mach_i386_i386;
1933 print_insn = print_insn_i386;
1934#elif defined(DEFAULT_ARM_CODEGEN)
1935 print_insn = print_insn_arm;
1936#elif defined(DEFAULT_X64_CODEGEN)
1937 disasm_info.mach = bfd_mach_x86_64;
1938 print_insn = print_insn_i386;
1939#else
1940#error "Unknown target for disassembler"
1941#endif
1942
1943#if defined(DEFAULT_X64_CODEGEN)
1944# define TARGET_FMT_lx "%llx"
1945#else
1946# define TARGET_FMT_lx "%08x"
1947#endif
1948 int Count;
1949 for( ; Length > 0; PC += Count, Length -= Count) {
1950 fprintf(out, "\t0x" TARGET_FMT_lx ": ", (bfd_vma) PC);
1951 Count = print_insn(PC, &disasm_info);
1952 fprintf(out, "\n");
1953 }
1954
1955 fprintf(out, "\n");
1956#undef TARGET_FMT_lx
1957
1958#endif /* USE_DISASSEMBLER */
1959 return;
1960 }
1961
1962 public:
1963 /* Will take the ownership of @MemMgr */
1964 CodeEmitter(CodeMemoryManager* pMemMgr) :
1965 mpMemMgr(pMemMgr),
1966 mpTJI(NULL),
1967 mpTD(NULL),
1968 mpCurEmitFunction(NULL),
1969 mpConstantPool(NULL),
1970 mpJumpTable(NULL),
1971 mpMMI(NULL),
1972 mpSymbolLookupFn(NULL),
1973 mpSymbolLookupContext(NULL)
1974 {
1975 return;
1976 }
1977
1978 inline global_addresses_const_iterator global_address_begin() const {
1979 return mGlobalAddressMap.begin();
1980 }
1981 inline global_addresses_const_iterator global_address_end() const {
1982 return mGlobalAddressMap.end();
1983 }
1984
1985 void registerSymbolCallback(BCCSymbolLookupFn pFn, BCCvoid* pContext) {
1986 mpSymbolLookupFn = pFn;
1987 mpSymbolLookupContext = pContext;
1988 return;
1989 }
1990
1991 void setTargetMachine(llvm::TargetMachine& TM) {
1992 /* set TargetJITInfo */
1993 mpTJI = TM.getJITInfo();
1994 /* set TargetData */
1995 mpTD = TM.getTargetData();
1996
1997 /*
1998 if(mpTJI->needsGOT())
1999 mpMemMgr->AllocateGOT(); // however,
2000 // both X86 and ARM target don't need GOT
2001 // (mpTJI->needsGOT() always returns false)
2002 */
2003 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
2004
2005 return;
2006 }
2007
2008 /*
2009 * startFunction - This callback is invoked when the specified function is
2010 * about to be code generated. This initializes the BufferBegin/End/Ptr
2011 * fields.
2012 */
2013 void startFunction(llvm::MachineFunction &F) {
2014 uintptr_t ActualSize = 0;
2015
2016 mpMemMgr->setMemoryWritable();
2017
2018 /*
2019 * BufferBegin, BufferEnd and CurBufferPtr
2020 * are all inherited from class MachineCodeEmitter,
2021 * which is the super class of the class JITCodeEmitter.
2022 *
2023 * BufferBegin/BufferEnd - Pointers to the start and end of the memory
2024 * allocated for this code buffer.
2025 *
2026 * CurBufferPtr - Pointer to the next byte of memory to fill when emitting
2027 * code.
2028 * This is guranteed to be in the range [BufferBegin,BufferEnd]. If
2029 * this pointer is at BufferEnd, it will never move due to code emission,
2030 * and
2031 * all code emission requests will be ignored (this is
2032 * the buffer overflow condition).
2033 */
2034 BufferBegin = CurBufferPtr = mpMemMgr
2035 ->startFunctionBody(F.getFunction(), ActualSize);
2036 BufferEnd = BufferBegin + ActualSize;
2037
2038 if(mpCurEmitFunction == NULL)
2039 mpCurEmitFunction = new EmittedFunctionCode();
2040 mpCurEmitFunction->FunctionBody = BufferBegin;
2041
2042 /* Ensure the constant pool/jump table info is at least 4-byte aligned. */
2043 emitAlignment(16);
2044
2045 emitConstantPool(F.getConstantPool());
2046 if(llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
2047 initJumpTableInfo(MJTI);
2048
2049 /* About to start emitting the machine code for the function. */
2050 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
2051
2052 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
2053
2054 mpCurEmitFunction->Code = CurBufferPtr;
2055
2056 mMBBLocations.clear();
2057
2058 return;
2059 }
2060
2061 /*
2062 * finishFunction - This callback is invoked
2063 * when the specified function has
2064 * finished code generation.
2065 * If a buffer overflow has occurred, this method
2066 * returns true (the callee is required to try again), otherwise it returns
2067 * false.
2068 */
2069 bool finishFunction(llvm::MachineFunction &F) {
2070 if(CurBufferPtr == BufferEnd) {
2071 /* No enough memory */
2072 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
2073 return false;
2074 }
2075
2076 if(llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
2077 emitJumpTableInfo(MJTI);
2078
2079 /*
2080 * FnStart is the start of the text,
2081 * not the start of the constant pool and other per-function data.
2082 */
2083 uint8_t* FnStart = (uint8_t*) GetPointerToGlobalIfAvailable
2084 (F.getFunction());
2085
2086 /* FnEnd is the end of the function's machine code. */
2087 uint8_t* FnEnd = CurBufferPtr;
2088
2089 if(!mRelocations.empty()) {
2090 /* Resolve the relocations to concrete pointers. */
2091 for(int i=0;i<mRelocations.size();i++) {
2092 llvm::MachineRelocation& MR = mRelocations[i];
2093 void* ResultPtr = NULL;
2094
2095 if(!MR.letTargetResolve()) {
2096 if(MR.isExternalSymbol()) {
2097 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
2098 if(MR.mayNeedFarStub())
2099 ResultPtr = GetExternalFunctionStub(ResultPtr);
2100 } else if(MR.isGlobalValue()) {
2101 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
2102 BufferBegin
2103 + MR.getMachineCodeOffset(),
2104 MR.mayNeedFarStub());
2105 } else if(MR.isIndirectSymbol()) {
2106 ResultPtr = GetPointerToGVIndirectSym
2107 (MR.getGlobalValue(),
2108 BufferBegin + MR.getMachineCodeOffset()
2109 );
2110 } else if(MR.isBasicBlock()) {
2111 ResultPtr =
2112 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
2113 } else if(MR.isConstantPoolIndex()) {
2114 ResultPtr =
2115 (void*) getConstantPoolEntryAddress
2116 (MR.getConstantPoolIndex());
2117 } else {
2118 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
2119 ResultPtr =
2120 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
2121 }
2122
2123 MR.setResultPointer(ResultPtr);
2124 }
2125 }
2126
2127 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
2128 mpMemMgr->getGOTBase());
2129 }
2130
2131 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
2132 /*
2133 * CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
2134 * global variables that were referenced in the relocations.
2135 */
2136 if(CurBufferPtr == BufferEnd)
2137 return false;
2138
2139 /* Now that we've succeeded in emitting the function */
2140 mpCurEmitFunction->Size = CurBufferPtr - BufferBegin;
2141 BufferBegin = CurBufferPtr = 0;
2142
2143 if(F.getFunction()->hasName())
2144 mEmittedFunctions[F.getFunction()->getNameStr()] = mpCurEmitFunction;
2145 mpCurEmitFunction = NULL;
2146
2147 mRelocations.clear();
2148 mConstPoolAddresses.clear();
2149
2150 /* Mark code region readable and executable if it's not so already. */
2151 mpMemMgr->setMemoryExecutable();
2152
2153 Disassembler(F.getFunction()->getNameStr(),
2154 FnStart, FnEnd - FnStart, (uintptr_t) FnStart);
2155
2156 if(mpMMI)
2157 mpMMI->EndFunction();
2158
2159 return false;
2160 }
2161
2162 void startGVStub(const llvm::GlobalValue* GV, unsigned StubSize,
2163 unsigned Alignment) {
2164 mpSavedBufferBegin = BufferBegin;
2165 mpSavedBufferEnd = BufferEnd;
2166 mpSavedCurBufferPtr = CurBufferPtr;
2167
2168 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
2169 Alignment);
2170 BufferEnd = BufferBegin + StubSize + 1;
2171
2172 return;
2173 }
2174
2175 void startGVStub(void* Buffer, unsigned StubSize) {
2176 mpSavedBufferBegin = BufferBegin;
2177 mpSavedBufferEnd = BufferEnd;
2178 mpSavedCurBufferPtr = CurBufferPtr;
2179
2180 BufferBegin = CurBufferPtr = (uint8_t *) Buffer;
2181 BufferEnd = BufferBegin + StubSize + 1;
2182
2183 return;
2184 }
2185
2186 void finishGVStub() {
2187 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
2188
2189 /* restore */
2190 BufferBegin = mpSavedBufferBegin;
2191 BufferEnd = mpSavedBufferEnd;
2192 CurBufferPtr = mpSavedCurBufferPtr;
2193
2194 return;
2195 }
2196
2197 /*
2198 * allocIndirectGV - Allocates and fills storage for an indirect
2199 * GlobalValue, and returns the address.
2200 */
2201 void* allocIndirectGV(const llvm::GlobalValue *GV,
2202 const uint8_t *Buffer, size_t Size,
2203 unsigned Alignment) {
2204 uint8_t* IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
2205 memcpy(IndGV, Buffer, Size);
2206 return IndGV;
2207 }
2208
2209 /* emitLabel - Emits a label */
2210 void emitLabel(uint64_t LabelID) {
2211 if(mLabelLocations.size() <= LabelID)
2212 mLabelLocations.resize((LabelID + 1) * 2);
2213 mLabelLocations[LabelID] = getCurrentPCValue();
2214 return;
2215 }
2216
2217 /*
2218 * allocateGlobal - Allocate memory for a global. Unlike allocateSpace,
2219 * this method does not allocate memory in the current output buffer,
2220 * because a global may live longer than the current function.
2221 */
2222 void* allocateGlobal(uintptr_t Size, unsigned Alignment) {
2223 /* Delegate this call through the memory manager. */
2224 return mpMemMgr->allocateGlobal(Size, Alignment);
2225 }
2226
2227 /*
2228 * StartMachineBasicBlock - This should be called by the target when a new
2229 * basic block is about to be emitted. This way the MCE knows where the
2230 * start of the block is, and can implement getMachineBasicBlockAddress.
2231 */
2232 void StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
2233 if(mMBBLocations.size() <= (unsigned) MBB->getNumber())
2234 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
2235 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
2236 return;
2237 }
2238
2239 /*
2240 * addRelocation - Whenever a relocatable address is needed, it should be
2241 * noted with this interface.
2242 */
2243 void addRelocation(const llvm::MachineRelocation &MR) {
2244 mRelocations.push_back(MR);
2245 return;
2246 }
2247
2248 /*
2249 * getConstantPoolEntryAddress - Return the address of the 'Index' entry in
2250 * the constant pool that was last emitted with
2251 * the emitConstantPool method.
2252 */
2253 uintptr_t getConstantPoolEntryAddress(unsigned Index) const {
2254 assert(Index < mpConstantPool->getConstants().size() &&
2255 "Invalid constant pool index!");
2256 return mConstPoolAddresses[Index];
2257 }
2258
2259 /*
2260 * getJumpTableEntryAddress - Return the address of the jump table
2261 * with index
2262 * 'Index' in the function that last called initJumpTableInfo.
2263 */
2264 uintptr_t getJumpTableEntryAddress(unsigned Index) const {
2265 const std::vector<llvm::MachineJumpTableEntry>& JT =
2266 mpJumpTable->getJumpTables();
2267
2268 assert(Index < JT.size() && "Invalid jump table index!");
2269
2270 unsigned int Offset = 0;
2271 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
2272
2273 for(int i=0;i<Index;i++)
2274 Offset += JT[i].MBBs.size();
2275 Offset *= EntrySize;
2276
2277 return (uintptr_t)((char *) mpJumpTableBase + Offset);
2278 }
2279
2280 /*
2281 * getMachineBasicBlockAddress - Return the address of the specified
2282 * MachineBasicBlock, only usable after the label for the MBB has been
2283 * emitted.
2284 */
2285 uintptr_t getMachineBasicBlockAddress(llvm::MachineBasicBlock *MBB) const {
2286 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
2287 mMBBLocations[MBB->getNumber()] && "MBB not emitted!");
2288 return mMBBLocations[MBB->getNumber()];
2289 }
2290
2291 /*
2292 * getLabelAddress - Return the address of the specified LabelID,
2293 * only usable after the LabelID has been emitted.
2294 */
2295 uintptr_t getLabelAddress(uint64_t LabelID) const {
2296 assert(mLabelLocations.size() > (unsigned) LabelID &&
2297 mLabelLocations[LabelID] && "Label not emitted!");
2298 return mLabelLocations[LabelID];
2299 }
2300
2301 /*
2302 * Specifies the MachineModuleInfo object.
2303 * This is used for exception handling
2304 * purposes.
2305 */
2306 void setModuleInfo(llvm::MachineModuleInfo* Info) {
2307 mpMMI = Info;
2308 return;
2309 }
2310
2311 void updateFunctionStub(llvm::Function* F) {
2312 /* Get the empty stub we generated earlier. */
2313 void* Stub;
2314 std::set<llvm::Function*>::iterator I = PendingFunctions.find(F);
2315 if(I != PendingFunctions.end())
2316 Stub = *I;
2317 else
2318 return;
2319
2320 void* Addr = GetPointerToGlobalIfAvailable(F);
2321
2322 assert(Addr != Stub &&
2323 "Function must have non-stub address to be updated.");
2324
2325 /*
2326 * Tell the target jit info to rewrite the stub at the specified address,
2327 * rather than creating a new one.
2328 */
2329 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
2330 startGVStub(Stub, SL.Size);
2331 mpTJI->emitFunctionStub(F, Addr, *this);
2332 finishGVStub();
2333
2334 Disassembler(F->getNameStr() + " (stub)", (uint8_t*) Stub,
2335 SL.Size, (uintptr_t) Stub);
2336
2337 PendingFunctions.erase(I);
2338
2339 return;
2340 }
2341
2342 /*
2343 * Once you finish the compilation on a translation unit,
2344 * you can call this function to recycle the memory
2345 * (which is used at compilation time and not needed for runtime).
2346 *
2347 * NOTE: You should not call this funtion until the code-gen passes
2348 * for a given module is done.
2349 * Otherwise, the results is undefined and may cause the system crash!
2350 */
2351 void releaseUnnecessary() {
2352 mMBBLocations.clear();
2353 mLabelLocations.clear();
2354 //sliao mGlobalAddressMap.clear();
2355 mFunctionToLazyStubMap.clear();
2356 GlobalToIndirectSymMap.clear();
2357 ExternalFnToStubMap.clear();
2358 PendingFunctions.clear();
2359
2360 return;
2361 }
2362
2363 void reset() {
2364 releaseUnnecessary();
2365
2366 mpSymbolLookupFn = NULL;
2367 mpSymbolLookupContext = NULL;
2368
2369 mpTJI = NULL;
2370 mpTD = NULL;
2371
2372 for(EmittedFunctionsMapTy::iterator I = mEmittedFunctions.begin();
2373 I != mEmittedFunctions.end();
2374 I++)
2375 if(I->second != NULL)
2376 delete I->second;
2377 mEmittedFunctions.clear();
2378
2379 mpMemMgr->reset();
2380
2381 return;
2382 }
2383
2384 void* lookup(const char* Name) {
2385 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(Name);
2386 if(I == mEmittedFunctions.end())
2387 return NULL;
2388 else
2389 return I->second->Code;
2390 }
2391
2392 void getVarNames(llvm::Module *M,
2393 BCCsizei* actualVarCount,
2394 BCCsizei maxVarCount,
2395 void** vars) {
2396 int cnt = 0;
2397 for (llvm::Module::const_global_iterator c = M->global_begin(),
2398 e = M->global_end(); c != e; ++c) {
2399 llvm::GlobalVariable *g = (const_cast<llvm::GlobalVariable*> (&(*c)));
2400 if (!g->hasInternalLinkage()) {
2401 cnt++;
2402 }
2403 }
2404
2405 if (actualVarCount)
2406 *actualVarCount = cnt;
2407 if (cnt > maxVarCount)
2408 cnt = maxVarCount;
2409 if (!vars)
2410 return;
2411
2412 for (llvm::Module::const_global_iterator c = M->global_begin(),
2413 e = M->global_end();
2414 c != e && cnt > 0;
2415 ++c, --cnt) {
2416 llvm::GlobalVariable *g = (const_cast<llvm::GlobalVariable*> (&(*c)));
2417 if (!g->hasInternalLinkage()) {
2418 // A member function in CodeEmitter
2419 *vars++ = (void*) GetPointerToGlobalIfAvailable(g);
2420 }
2421 }
2422 }
2423
2424 void getFunctionNames(BCCsizei* actualFunctionCount,
2425 BCCsizei maxFunctionCount,
2426 BCCchar** functions) {
2427 int functionCount = mEmittedFunctions.size();
2428
2429 if(actualFunctionCount)
2430 *actualFunctionCount = functionCount;
2431 if(functionCount > maxFunctionCount)
2432 functionCount = maxFunctionCount;
2433 if(functions)
2434 for(EmittedFunctionsMapTy::const_iterator it =
2435 mEmittedFunctions.begin();
2436 functionCount > 0;
2437 functionCount--, it++)
2438 *functions++ = (BCCchar*) it->first.c_str();
2439
2440 return;
2441 }
2442
2443 void getFunctionBinary(BCCchar* label,
2444 BCCvoid** base,
2445 BCCsizei* length) {
2446 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(label);
2447 if(I == mEmittedFunctions.end()) {
2448 *base = NULL;
2449 *length = 0;
2450 } else {
2451 *base = I->second->Code;
2452 *length = I->second->Size;
2453 }
2454 return;
2455 }
2456
2457 ~CodeEmitter() {
2458 if(mpMemMgr)
2459 delete mpMemMgr;
2460 return;
2461 }
2462 /* }}} */
2463 }; /* End of Class CodeEmitter */
2464
2465 /* The CodeEmitter */
2466 llvm::OwningPtr<CodeEmitter> mCodeEmitter;
2467 CodeEmitter* createCodeEmitter() {
2468 mCodeEmitter.reset(new CodeEmitter(mCodeMemMgr.take()));
2469 return mCodeEmitter.get();
2470 }
2471
2472 BCCSymbolLookupFn mpSymbolLookupFn;
2473 void* mpSymbolLookupContext;
2474
2475 llvm::Module* mModule;
2476
2477 bool mTypeInformationPrepared;
2478 std::vector<const llvm::Type*> mTypes;
2479
2480 typedef llvm::StringMap<void*> GlobalVarAddresseTy;
2481 GlobalVarAddresseTy mGlobalVarAddresses;
2482
2483 public:
2484 Compiler() : mpSymbolLookupFn(NULL), mpSymbolLookupContext(NULL), mModule(NULL) {
2485 llvm::llvm_install_error_handler(LLVMErrorHandler, &mError);
2486 return;
2487 }
2488
2489 /* interface for BCCscript::registerSymbolCallback() */
2490 void registerSymbolCallback(BCCSymbolLookupFn pFn, BCCvoid* pContext) {
2491 mpSymbolLookupFn = pFn;
2492 mpSymbolLookupContext = pContext;
2493 return;
2494 }
2495
2496 int loadModule(const char* bitcode, size_t bitcodeSize) {
2497 llvm::MemoryBuffer* SB = NULL;
2498
2499 if(bitcode == NULL || bitcodeSize <= 0)
2500 return 0;
2501
2502 GlobalInitialization();
2503
2504 /* Package input to object MemoryBuffer */
2505 SB = llvm::MemoryBuffer::getMemBuffer(bitcode, bitcode + bitcodeSize);
2506 if(SB == NULL) {
2507 setError("Error reading input Bitcode into memory");
2508 goto on_bcc_load_module_error;
2509 }
2510
2511 /* Read the input Bitcode as a Module */
2512 mModule = llvm::ParseBitcodeFile(SB, llvm::getGlobalContext(), &mError);
2513
2514on_bcc_load_module_error:
2515 if (SB)
2516 delete SB;
2517
2518 return hasError();
2519 }
2520
2521 /* interace for bccCompileScript() */
2522 int compile() {
2523 llvm::TargetData* TD = NULL;
2524
2525 llvm::TargetMachine* TM = NULL;
2526 const llvm::Target* Target;
2527 std::string FeaturesStr;
2528
2529 llvm::FunctionPassManager* CodeGenPasses = NULL;
2530 const llvm::NamedMDNode* PragmaMetadata;
2531
2532 if(mModule == NULL) /* No module was loaded */
2533 return 0;
2534
2535 /* Create TargetMachine */
2536 Target = llvm::TargetRegistry::lookupTarget(Triple, mError);
2537 if(hasError())
2538 goto on_bcc_compile_error;
2539
2540 if(!CPU.empty() || !Features.empty()) {
2541 llvm::SubtargetFeatures F;
2542 F.setCPU(CPU);
2543 for(std::vector<std::string>::const_iterator it = Features.begin();
2544 it != Features.end();
2545 it++)
2546 F.AddFeature(*it);
2547 FeaturesStr = F.getString();
2548 }
2549
2550 TM = Target->createTargetMachine(Triple, FeaturesStr);
2551 if(TM == NULL) {
2552 setError("Failed to create target machine implementation for the"
2553 " specified triple '" + Triple + "'");
2554 goto on_bcc_compile_error;
2555 }
2556
2557 /* Create memory manager for creation of code emitter later */
2558 if(!mCodeMemMgr.get() && !createCodeMemoryManager()) {
2559 setError("Failed to startup memory management for further compilation");
2560 goto on_bcc_compile_error;
2561 }
2562
2563 /* Create code emitter */
2564 if(!mCodeEmitter.get()) {
2565 if(!createCodeEmitter()) {
2566 setError("Failed to create machine code emitter to complete"
2567 " the compilation");
2568 goto on_bcc_compile_error;
2569 }
2570 } else {
2571 /* reuse the code emitter */
2572 mCodeEmitter->reset();
2573 }
2574
2575 mCodeEmitter->setTargetMachine(*TM);
2576 mCodeEmitter->registerSymbolCallback(mpSymbolLookupFn,
2577 mpSymbolLookupContext);
2578
2579 /* Get target data from Module */
2580 TD = new llvm::TargetData(mModule);
2581 /* Create code-gen pass to run the code emitter */
2582 CodeGenPasses = new llvm::FunctionPassManager(mModule);
2583 CodeGenPasses->add(TD); // Will take the ownership of TD
2584
2585 if(TM->addPassesToEmitMachineCode(*CodeGenPasses,
2586 *mCodeEmitter, CodeGenOptLevel)) {
2587 setError("The machine code emission is not supported by BCC on target '"
2588 + Triple + "'");
2589 goto on_bcc_compile_error;
2590 }
2591
2592 /*
2593 * Run the pass (the code emitter) on every non-declaration function
2594 * in the module
2595 */
2596 CodeGenPasses->doInitialization();
2597 for(llvm::Module::iterator I = mModule->begin();
2598 I != mModule->end();
2599 I++)
2600 if(!I->isDeclaration())
2601 CodeGenPasses->run(*I);
2602
2603 CodeGenPasses->doFinalization();
2604
2605 /* Copy the global address mapping from code emitter and remapping */
2606 for(CodeEmitter::global_addresses_const_iterator I =
2607 mCodeEmitter->global_address_begin();
2608 I != mCodeEmitter->global_address_end();
2609 I++)
2610 {
2611 if(I->first->getValueID() != llvm::Value::GlobalVariableVal)
2612 continue;
2613 llvm::StringRef GlobalVarName = I->first->getName();
2614 GlobalVarAddresseTy::value_type* V =
2615 GlobalVarAddresseTy::value_type::Create(
2616 GlobalVarName.begin(),
2617 GlobalVarName.end(),
2618 mGlobalVarAddresses.getAllocator(),
2619 I->second
2620 );
2621 bool ret = mGlobalVarAddresses.insert(V);
2622 assert(ret && "The global variable name should be unique over the module");
2623 }
2624
2625 /*
2626 * Tell code emitter now can release the memory using
2627 * during the JIT since we have done the code emission
2628 */
2629 mCodeEmitter->releaseUnnecessary();
2630
2631 /*
2632 * Finally, read pragma information from the metadata node
2633 * of the @Module if any
2634 */
2635 PragmaMetadata = mModule->getNamedMetadata(PragmaMetadataName);
2636 if(PragmaMetadata)
2637 for(int i=0;i<PragmaMetadata->getNumOperands();i++) {
2638 llvm::MDNode* Pragma = PragmaMetadata->getOperand(i);
2639 if(Pragma != NULL &&
2640 Pragma->getNumOperands() == 2 /* should have exactly 2 operands */) {
2641 llvm::Value* PragmaNameMDS = Pragma->getOperand(0);
2642 llvm::Value* PragmaValueMDS = Pragma->getOperand(1);
2643
2644 if((PragmaNameMDS->getValueID() == llvm::Value::MDStringVal) &&
2645 (PragmaValueMDS->getValueID() == llvm::Value::MDStringVal)) {
2646 llvm::StringRef PragmaName =
2647 static_cast<llvm::MDString*>(PragmaNameMDS)->getString();
2648 llvm::StringRef PragmaValue =
2649 static_cast<llvm::MDString*>(PragmaValueMDS)->getString();
2650
2651 mPragmas.push_back( make_pair( std::string(PragmaName.data(),
2652 PragmaName.size()),
2653 std::string(PragmaValue.data(),
2654 PragmaValue.size())
2655 )
2656 );
2657 }
2658 }
2659 }
2660
2661 on_bcc_compile_error:
2662 if (CodeGenPasses) {
2663 delete CodeGenPasses;
2664 } else if (TD) {
2665 delete TD;
2666 }
2667 if (TM)
2668 delete TM;
2669
2670 return hasError();
2671 }
2672
2673 /* interface for bccGetScriptInfoLog() */
2674 char* getErrorMessage() {
2675 return const_cast<char*>(mError.c_str());
2676 }
2677
2678 /* interface for bccGetScriptLabel() */
2679 void* lookup(const char* name) {
2680 void* addr = NULL;
2681 if(mCodeEmitter.get()) {
2682 /* Find function pointer */
2683 addr = mCodeEmitter->lookup(name);
2684 if(addr == NULL) {
2685 /*
2686 * No function labeled with given name.
2687 * Try searching the global variables.
2688 */
2689 GlobalVarAddresseTy::const_iterator I = mGlobalVarAddresses.find(name);
2690 if(I != mGlobalVarAddresses.end())
2691 addr = I->getValue();
2692 }
2693 }
2694 return addr;
2695 }
2696
2697 /* Interface for bccGetPragmas() */
2698 void getPragmas(BCCsizei* actualStringCount,
2699 BCCsizei maxStringCount,
2700 BCCchar** strings) {
2701 int stringCount = mPragmas.size() * 2;
2702
2703 if(actualStringCount)
2704 *actualStringCount = stringCount;
2705 if(stringCount > maxStringCount)
2706 stringCount = maxStringCount;
2707 if(strings)
2708 for(PragmaList::const_iterator it = mPragmas.begin();
2709 stringCount > 0;
2710 stringCount-=2, it++)
2711 {
2712 *strings++ = (BCCchar*) it->first.c_str();
2713 *strings++ = (BCCchar*) it->second.c_str();
2714 }
2715
2716 return;
2717 }
2718
2719 /* Interface for bccGetVars() */
2720 void getVars(BCCsizei* actualVarCount,
2721 BCCsizei maxVarCount,
2722 void** vars) {
2723 if(mCodeEmitter.get())
2724 mCodeEmitter->getVarNames(mModule,
2725 actualVarCount,
2726 maxVarCount,
2727 vars);
2728 else
2729 *actualVarCount = 0;
2730
2731 return;
2732 }
2733
2734 /* Interface for bccGetFunctions() */
2735 void getFunctions(BCCsizei* actualFunctionCount,
2736 BCCsizei maxFunctionCount,
2737 BCCchar** functions) {
2738 if(mCodeEmitter.get())
2739 mCodeEmitter->getFunctionNames(actualFunctionCount,
2740 maxFunctionCount,
2741 functions);
2742 else
2743 *actualFunctionCount = 0;
2744
2745 return;
2746 }
2747
2748 /* Interface for bccGetFunctionBinary() */
2749 void getFunctionBinary(BCCchar* function,
2750 BCCvoid** base,
2751 BCCsizei* length) {
2752 if(mCodeEmitter.get()) {
2753 mCodeEmitter->getFunctionBinary(function, base, length);
2754 } else {
2755 *base = NULL;
2756 *length = 0;
2757 }
2758 return;
2759 }
2760
2761 inline const llvm::Module* getModule() const {
2762 return mModule;
2763 }
2764
2765 inline const std::vector<const llvm::Type*>& getTypes() const {
2766 return mTypes;
2767 }
2768
2769 ~Compiler() {
2770 delete mModule;
2771 llvm::llvm_shutdown();
2772 return;
2773 }
2774}; /* End of Class Compiler */
2775
2776bool Compiler::GlobalInitialized = false;
2777
2778/* Code generation optimization level for the compiler */
2779llvm::CodeGenOpt::Level Compiler::CodeGenOptLevel;
2780
2781std::string Compiler::Triple;
2782
2783std::string Compiler::CPU;
2784
2785std::vector<std::string> Compiler::Features;
2786
2787/*
2788 * The named of metadata node that pragma resides
2789 * (should be synced with slang.cpp)
2790 */
2791const llvm::StringRef Compiler::PragmaMetadataName = "#pragma";
2792
2793struct BCCscript {
2794 /*
2795 * Part I. Compiler
2796 */
2797
2798 Compiler compiler;
2799
2800 void registerSymbolCallback(BCCSymbolLookupFn pFn, BCCvoid* pContext) {
2801 compiler.registerSymbolCallback(pFn, pContext);
2802 }
2803
2804 /*
2805 * Part II. Logistics & Error handling
2806 */
2807
2808 BCCscript() {
2809 bccError = BCC_NO_ERROR;
2810 }
2811
2812 ~BCCscript() {
2813 }
2814
2815 void setError(BCCenum error) {
2816 if (bccError == BCC_NO_ERROR && error != BCC_NO_ERROR) {
2817 bccError = error;
2818 }
2819 }
2820
2821 BCCenum getError() {
2822 BCCenum result = bccError;
2823 bccError = BCC_NO_ERROR;
2824 return result;
2825 }
2826
2827 BCCenum bccError;
2828};
2829
2830
2831extern "C"
2832BCCscript* bccCreateScript()
2833{
2834 return new BCCscript();
2835}
2836
2837extern "C"
2838BCCenum bccGetError( BCCscript* script )
2839{
2840 return script->getError();
2841}
2842
2843extern "C"
2844void bccDeleteScript(BCCscript* script) {
2845 delete script;
2846}
2847
2848extern "C"
2849void bccRegisterSymbolCallback(BCCscript* script,
2850 BCCSymbolLookupFn pFn,
2851 BCCvoid* pContext)
2852{
2853 script->registerSymbolCallback(pFn, pContext);
2854}
2855
2856extern "C"
2857void bccScriptBitcode(BCCscript* script,
2858 const BCCchar* bitcode,
2859 BCCint size)
2860{
2861 script->compiler.loadModule(bitcode, size);
2862}
2863
2864extern "C"
2865void bccCompileScript(BCCscript* script)
2866{
2867 int result = script->compiler.compile();
2868 if (result)
2869 script->setError(BCC_INVALID_OPERATION);
2870}
2871
2872extern "C"
2873void bccGetScriptInfoLog(BCCscript* script,
2874 BCCsizei maxLength,
2875 BCCsizei* length,
2876 BCCchar* infoLog)
2877{
2878 char* message = script->compiler.getErrorMessage();
2879 int messageLength = strlen(message) + 1;
2880 if (length)
2881 *length = messageLength;
2882
2883 if (infoLog && maxLength > 0) {
2884 int trimmedLength = maxLength < messageLength ? maxLength : messageLength;
2885 memcpy(infoLog, message, trimmedLength);
2886 infoLog[trimmedLength] = 0;
2887 }
2888}
2889
2890extern "C"
2891void bccGetScriptLabel(BCCscript* script,
2892 const BCCchar * name,
2893 BCCvoid ** address)
2894{
2895 void* value = script->compiler.lookup(name);
2896 if (value)
2897 *address = value;
2898 else
2899 script->setError(BCC_INVALID_VALUE);
2900}
2901
2902extern "C"
2903void bccGetPragmas(BCCscript* script,
2904 BCCsizei* actualStringCount,
2905 BCCsizei maxStringCount,
2906 BCCchar** strings)
2907{
2908 script->compiler.getPragmas(actualStringCount, maxStringCount, strings);
2909}
2910
2911extern "C"
2912void bccGetVars(BCCscript* script,
2913 BCCsizei* actualVarCount,
2914 BCCsizei maxVarCount,
2915 void** vars)
2916{
2917 script->compiler.getVars(actualVarCount,
2918 maxVarCount,
2919 vars);
2920}
2921
2922extern "C"
2923void bccGetFunctions(BCCscript* script,
2924 BCCsizei* actualFunctionCount,
2925 BCCsizei maxFunctionCount,
2926 BCCchar** functions)
2927{
2928 script->compiler.getFunctions(actualFunctionCount,
2929 maxFunctionCount,
2930 functions);
2931}
2932
2933extern "C"
2934void bccGetFunctionBinary(BCCscript* script,
2935 BCCchar* function,
2936 BCCvoid** base,
2937 BCCsizei* length)
2938{
2939 script->compiler.getFunctionBinary(function, base, length);
2940}
2941
2942struct BCCtype {
2943 const Compiler* compiler;
2944 const llvm::Type* t;
2945};
2946
2947} /* End of namespace bcc */