blob: 7acedb5b0e2a81a0e6125363bc34555cc1d40e0d [file] [log] [blame]
Shih-wei Liao67d8f372011-01-16 22:48:35 -08001//===-- CodeEmitter.cpp - CodeEmitter Class -------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See external/llvm/LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the CodeEmitter class.
11//
12//===----------------------------------------------------------------------===//
13
14#define LOG_TAG "bcc"
15#include <cutils/log.h>
16
17#include "CodeEmitter.h"
18
19#include "Config.h"
20
21#include "CodeMemoryManager.h"
22#include "Runtime.h"
23#include "ScriptCompiled.h"
24
25#include <bcc/bcc.h>
26#include <bcc/bcc_cache.h>
27#include "bcc_internal.h"
28
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/APInt.h"
31#include "llvm/ADT/DenseMap.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringRef.h"
34
35#include "llvm/CodeGen/MachineBasicBlock.h"
36#include "llvm/CodeGen/MachineConstantPool.h"
37#include "llvm/CodeGen/MachineFunction.h"
38#include "llvm/CodeGen/MachineModuleInfo.h"
39#include "llvm/CodeGen/MachineRelocation.h"
40#include "llvm/CodeGen/MachineJumpTableInfo.h"
41#include "llvm/CodeGen/JITCodeEmitter.h"
42
43#include "llvm/ExecutionEngine/GenericValue.h"
44
45#include "llvm/MC/MCAsmInfo.h"
46#include "llvm/MC/MCDisassembler.h"
47#include "llvm/MC/MCInst.h"
48#include "llvm/MC/MCInstPrinter.h"
49
50#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/raw_ostream.h"
52
53#if USE_DISASSEMBLER
54#include "llvm/Support/MemoryObject.h"
55#endif
56
Logan Chienc4ea07f2011-03-09 17:27:50 +080057#include "llvm/Support/Host.h"
Shih-wei Liao67d8f372011-01-16 22:48:35 -080058
59#include "llvm/Target/TargetData.h"
60#include "llvm/Target/TargetMachine.h"
61#include "llvm/Target/TargetRegistry.h"
62#include "llvm/Target/TargetJITInfo.h"
63
64#include "llvm/Constant.h"
65#include "llvm/Constants.h"
66#include "llvm/DerivedTypes.h"
67#include "llvm/Function.h"
68#include "llvm/GlobalAlias.h"
69#include "llvm/GlobalValue.h"
70#include "llvm/GlobalVariable.h"
71#include "llvm/Instruction.h"
72#include "llvm/Type.h"
73
74#include <algorithm>
75#include <vector>
76#include <set>
77#include <string>
78
79#include <stddef.h>
80
81
82namespace {
83
84#if USE_DISASSEMBLER
85class BufferMemoryObject : public llvm::MemoryObject {
86private:
87 const uint8_t *mBytes;
88 uint64_t mLength;
89
90public:
91 BufferMemoryObject(const uint8_t *Bytes, uint64_t Length)
92 : mBytes(Bytes), mLength(Length) {
93 }
94
95 virtual uint64_t getBase() const { return 0; }
96 virtual uint64_t getExtent() const { return mLength; }
97
98 virtual int readByte(uint64_t Addr, uint8_t *Byte) const {
99 if (Addr > getExtent())
100 return -1;
101 *Byte = mBytes[Addr];
102 return 0;
103 }
104};
105#endif
106
107}; // namespace anonymous
108
109
110namespace bcc {
111
112// Will take the ownership of @MemMgr
113CodeEmitter::CodeEmitter(ScriptCompiled *result, CodeMemoryManager *pMemMgr)
114 : mpResult(result),
115 mpMemMgr(pMemMgr),
116 mpTarget(NULL),
117 mpTJI(NULL),
118 mpTD(NULL),
119 mpCurEmitFunction(NULL),
120 mpConstantPool(NULL),
121 mpJumpTable(NULL),
122 mpMMI(NULL),
123#if USE_DISASSEMBLER
124 mpAsmInfo(NULL),
125 mpDisassmbler(NULL),
126 mpIP(NULL),
127#endif
128 mpSymbolLookupFn(NULL),
129 mpSymbolLookupContext(NULL) {
130}
131
132
133CodeEmitter::~CodeEmitter() {
134#if USE_DISASSEMBLER
135 delete mpAsmInfo;
136 delete mpDisassmbler;
137 delete mpIP;
138#endif
139}
140
141
142// Once you finish the compilation on a translation unit, you can call this
143// function to recycle the memory (which is used at compilation time and not
144// needed for runtime).
145//
146// NOTE: You should not call this funtion until the code-gen passes for a
147// given module is done. Otherwise, the results is undefined and may
148// cause the system crash!
149void CodeEmitter::releaseUnnecessary() {
150 mMBBLocations.clear();
151 mLabelLocations.clear();
152 mGlobalAddressMap.clear();
153 mFunctionToLazyStubMap.clear();
154 GlobalToIndirectSymMap.clear();
155 ExternalFnToStubMap.clear();
156 PendingFunctions.clear();
157}
158
159
160void CodeEmitter::reset() {
161 releaseUnnecessary();
162
163 mpResult = NULL;
164
165 mpSymbolLookupFn = NULL;
166 mpSymbolLookupContext = NULL;
167
168 mpTJI = NULL;
169 mpTD = NULL;
170
171 mpMemMgr->reset();
172}
173
174
175void *CodeEmitter::UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
176 if (Addr == NULL) {
177 // Removing mapping
178 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
179 void *OldVal;
180
181 if (I == mGlobalAddressMap.end()) {
182 OldVal = NULL;
183 } else {
184 OldVal = I->second;
185 mGlobalAddressMap.erase(I);
186 }
187
188 return OldVal;
189 }
190
191 void *&CurVal = mGlobalAddressMap[GV];
192 void *OldVal = CurVal;
193
194 CurVal = Addr;
195
196 return OldVal;
197}
198
199
200unsigned int CodeEmitter::GetConstantPoolSizeInBytes(
201 llvm::MachineConstantPool *MCP) {
202 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
203 MCP->getConstants();
204
205 if (Constants.empty())
206 return 0;
207
208 unsigned int Size = 0;
209 for (int i = 0, e = Constants.size(); i != e; i++) {
210 llvm::MachineConstantPoolEntry CPE = Constants[i];
211 unsigned int AlignMask = CPE.getAlignment() - 1;
212 Size = (Size + AlignMask) & ~AlignMask;
213 const llvm::Type *Ty = CPE.getType();
214 Size += mpTD->getTypeAllocSize(Ty);
215 }
216
217 return Size;
218}
219
220// This function converts a Constant* into a GenericValue. The interesting
221// part is if C is a ConstantExpr.
222void CodeEmitter::GetConstantValue(const llvm::Constant *C,
223 llvm::GenericValue &Result) {
224 if (C->getValueID() == llvm::Value::UndefValueVal)
225 return;
226 else if (C->getValueID() == llvm::Value::ConstantExprVal) {
227 const llvm::ConstantExpr *CE = (llvm::ConstantExpr*) C;
228 const llvm::Constant *Op0 = CE->getOperand(0);
229
230 switch (CE->getOpcode()) {
231 case llvm::Instruction::GetElementPtr: {
232 // Compute the index
233 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
234 CE->op_end());
235 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
236 &Indices[0],
237 Indices.size());
238
239 GetConstantValue(Op0, Result);
240 Result.PointerVal =
241 static_cast<uint8_t*>(Result.PointerVal) + Offset;
242
243 return;
244 }
245 case llvm::Instruction::Trunc: {
246 uint32_t BitWidth =
247 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
248
249 GetConstantValue(Op0, Result);
250 Result.IntVal = Result.IntVal.trunc(BitWidth);
251
252 return;
253 }
254 case llvm::Instruction::ZExt: {
255 uint32_t BitWidth =
256 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
257
258 GetConstantValue(Op0, Result);
259 Result.IntVal = Result.IntVal.zext(BitWidth);
260
261 return;
262 }
263 case llvm::Instruction::SExt: {
264 uint32_t BitWidth =
265 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
266
267 GetConstantValue(Op0, Result);
268 Result.IntVal = Result.IntVal.sext(BitWidth);
269
270 return;
271 }
272 case llvm::Instruction::FPTrunc: {
273 // TODO(all): fixme: long double
274 GetConstantValue(Op0, Result);
275 Result.FloatVal = static_cast<float>(Result.DoubleVal);
276 return;
277 }
278 case llvm::Instruction::FPExt: {
279 // TODO(all): fixme: long double
280 GetConstantValue(Op0, Result);
281 Result.DoubleVal = static_cast<double>(Result.FloatVal);
282 return;
283 }
284 case llvm::Instruction::UIToFP: {
285 GetConstantValue(Op0, Result);
286 if (CE->getType()->isFloatTy())
287 Result.FloatVal =
288 static_cast<float>(Result.IntVal.roundToDouble());
289 else if (CE->getType()->isDoubleTy())
290 Result.DoubleVal = Result.IntVal.roundToDouble();
291 else if (CE->getType()->isX86_FP80Ty()) {
292 const uint64_t zero[] = { 0, 0 };
293 llvm::APFloat apf(llvm::APInt(80, 2, zero));
294 apf.convertFromAPInt(Result.IntVal,
295 false,
296 llvm::APFloat::rmNearestTiesToEven);
297 Result.IntVal = apf.bitcastToAPInt();
298 }
299 return;
300 }
301 case llvm::Instruction::SIToFP: {
302 GetConstantValue(Op0, Result);
303 if (CE->getType()->isFloatTy())
304 Result.FloatVal =
305 static_cast<float>(Result.IntVal.signedRoundToDouble());
306 else if (CE->getType()->isDoubleTy())
307 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
308 else if (CE->getType()->isX86_FP80Ty()) {
309 const uint64_t zero[] = { 0, 0 };
310 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
311 apf.convertFromAPInt(Result.IntVal,
312 true,
313 llvm::APFloat::rmNearestTiesToEven);
314 Result.IntVal = apf.bitcastToAPInt();
315 }
316 return;
317 }
318 // double->APInt conversion handles sign
319 case llvm::Instruction::FPToUI:
320 case llvm::Instruction::FPToSI: {
321 uint32_t BitWidth =
322 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
323
324 GetConstantValue(Op0, Result);
325 if (Op0->getType()->isFloatTy())
326 Result.IntVal =
327 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
328 else if (Op0->getType()->isDoubleTy())
329 Result.IntVal =
330 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal,
331 BitWidth);
332 else if (Op0->getType()->isX86_FP80Ty()) {
333 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
334 uint64_t V;
335 bool Ignored;
336 apf.convertToInteger(&V,
337 BitWidth,
338 CE->getOpcode() == llvm::Instruction::FPToSI,
339 llvm::APFloat::rmTowardZero,
340 &Ignored);
341 Result.IntVal = V; // endian?
342 }
343 return;
344 }
345 case llvm::Instruction::PtrToInt: {
346 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
347
348 GetConstantValue(Op0, Result);
349 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
350 (Result.PointerVal));
351
352 return;
353 }
354 case llvm::Instruction::IntToPtr: {
355 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
356
357 GetConstantValue(Op0, Result);
358 if (PtrWidth != Result.IntVal.getBitWidth())
359 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
360 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
361
362 Result.PointerVal =
363 llvm::PointerTy(
364 static_cast<uintptr_t>(Result.IntVal.getZExtValue()));
365
366 return;
367 }
368 case llvm::Instruction::BitCast: {
369 GetConstantValue(Op0, Result);
370 const llvm::Type *DestTy = CE->getType();
371
372 switch (Op0->getType()->getTypeID()) {
373 case llvm::Type::IntegerTyID: {
374 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
375 if (DestTy->isFloatTy())
376 Result.FloatVal = Result.IntVal.bitsToFloat();
377 else if (DestTy->isDoubleTy())
378 Result.DoubleVal = Result.IntVal.bitsToDouble();
379 break;
380 }
381 case llvm::Type::FloatTyID: {
382 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
383 Result.IntVal.floatToBits(Result.FloatVal);
384 break;
385 }
386 case llvm::Type::DoubleTyID: {
387 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
388 Result.IntVal.doubleToBits(Result.DoubleVal);
389 break;
390 }
391 case llvm::Type::PointerTyID: {
392 assert(DestTy->isPointerTy() && "Invalid bitcast");
393 break; // getConstantValue(Op0) above already converted it
394 }
395 default: {
396 llvm_unreachable("Invalid bitcast operand");
397 }
398 }
399 return;
400 }
401 case llvm::Instruction::Add:
402 case llvm::Instruction::FAdd:
403 case llvm::Instruction::Sub:
404 case llvm::Instruction::FSub:
405 case llvm::Instruction::Mul:
406 case llvm::Instruction::FMul:
407 case llvm::Instruction::UDiv:
408 case llvm::Instruction::SDiv:
409 case llvm::Instruction::URem:
410 case llvm::Instruction::SRem:
411 case llvm::Instruction::And:
412 case llvm::Instruction::Or:
413 case llvm::Instruction::Xor: {
414 llvm::GenericValue LHS, RHS;
415 GetConstantValue(Op0, LHS);
416 GetConstantValue(CE->getOperand(1), RHS);
417
418 switch (Op0->getType()->getTypeID()) {
419 case llvm::Type::IntegerTyID: {
420 switch (CE->getOpcode()) {
421 case llvm::Instruction::Add: {
422 Result.IntVal = LHS.IntVal + RHS.IntVal;
423 break;
424 }
425 case llvm::Instruction::Sub: {
426 Result.IntVal = LHS.IntVal - RHS.IntVal;
427 break;
428 }
429 case llvm::Instruction::Mul: {
430 Result.IntVal = LHS.IntVal * RHS.IntVal;
431 break;
432 }
433 case llvm::Instruction::UDiv: {
434 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
435 break;
436 }
437 case llvm::Instruction::SDiv: {
438 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
439 break;
440 }
441 case llvm::Instruction::URem: {
442 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
443 break;
444 }
445 case llvm::Instruction::SRem: {
446 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
447 break;
448 }
449 case llvm::Instruction::And: {
450 Result.IntVal = LHS.IntVal & RHS.IntVal;
451 break;
452 }
453 case llvm::Instruction::Or: {
454 Result.IntVal = LHS.IntVal | RHS.IntVal;
455 break;
456 }
457 case llvm::Instruction::Xor: {
458 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
459 break;
460 }
461 default: {
462 llvm_unreachable("Invalid integer opcode");
463 }
464 }
465 break;
466 }
467 case llvm::Type::FloatTyID: {
468 switch (CE->getOpcode()) {
469 case llvm::Instruction::FAdd: {
470 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
471 break;
472 }
473 case llvm::Instruction::FSub: {
474 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
475 break;
476 }
477 case llvm::Instruction::FMul: {
478 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
479 break;
480 }
481 case llvm::Instruction::FDiv: {
482 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
483 break;
484 }
485 case llvm::Instruction::FRem: {
486 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
487 break;
488 }
489 default: {
490 llvm_unreachable("Invalid float opcode");
491 }
492 }
493 break;
494 }
495 case llvm::Type::DoubleTyID: {
496 switch (CE->getOpcode()) {
497 case llvm::Instruction::FAdd: {
498 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
499 break;
500 }
501 case llvm::Instruction::FSub: {
502 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
503 break;
504 }
505 case llvm::Instruction::FMul: {
506 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
507 break;
508 }
509 case llvm::Instruction::FDiv: {
510 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
511 break;
512 }
513 case llvm::Instruction::FRem: {
514 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
515 break;
516 }
517 default: {
518 llvm_unreachable("Invalid double opcode");
519 }
520 }
521 break;
522 }
523 case llvm::Type::X86_FP80TyID:
524 case llvm::Type::PPC_FP128TyID:
525 case llvm::Type::FP128TyID: {
526 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
527 switch (CE->getOpcode()) {
528 case llvm::Instruction::FAdd: {
529 apfLHS.add(llvm::APFloat(RHS.IntVal),
530 llvm::APFloat::rmNearestTiesToEven);
531 break;
532 }
533 case llvm::Instruction::FSub: {
534 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
535 llvm::APFloat::rmNearestTiesToEven);
536 break;
537 }
538 case llvm::Instruction::FMul: {
539 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
540 llvm::APFloat::rmNearestTiesToEven);
541 break;
542 }
543 case llvm::Instruction::FDiv: {
544 apfLHS.divide(llvm::APFloat(RHS.IntVal),
545 llvm::APFloat::rmNearestTiesToEven);
546 break;
547 }
548 case llvm::Instruction::FRem: {
549 apfLHS.mod(llvm::APFloat(RHS.IntVal),
550 llvm::APFloat::rmNearestTiesToEven);
551 break;
552 }
553 default: {
554 llvm_unreachable("Invalid long double opcode");
555 }
556 }
557 Result.IntVal = apfLHS.bitcastToAPInt();
558 break;
559 }
560 default: {
561 llvm_unreachable("Bad add type!");
562 }
563 } // End switch (Op0->getType()->getTypeID())
564 return;
565 }
566 default: {
567 break;
568 }
569 } // End switch (CE->getOpcode())
570
571 std::string msg;
572 llvm::raw_string_ostream Msg(msg);
573 Msg << "ConstantExpr not handled: " << *CE;
574 llvm::report_fatal_error(Msg.str());
575 } // C->getValueID() == llvm::Value::ConstantExprVal
576
577 switch (C->getType()->getTypeID()) {
578 case llvm::Type::FloatTyID: {
579 Result.FloatVal =
580 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToFloat();
581 break;
582 }
583 case llvm::Type::DoubleTyID: {
584 Result.DoubleVal =
585 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToDouble();
586 break;
587 }
588 case llvm::Type::X86_FP80TyID:
589 case llvm::Type::FP128TyID:
590 case llvm::Type::PPC_FP128TyID: {
591 Result.IntVal =
592 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().bitcastToAPInt();
593 break;
594 }
595 case llvm::Type::IntegerTyID: {
596 Result.IntVal =
597 llvm::cast<llvm::ConstantInt>(C)->getValue();
598 break;
599 }
600 case llvm::Type::PointerTyID: {
601 switch (C->getValueID()) {
602 case llvm::Value::ConstantPointerNullVal: {
603 Result.PointerVal = NULL;
604 break;
605 }
606 case llvm::Value::FunctionVal: {
607 const llvm::Function *F = static_cast<const llvm::Function*>(C);
608 Result.PointerVal =
609 GetPointerToFunctionOrStub(const_cast<llvm::Function*>(F));
610 break;
611 }
612 case llvm::Value::GlobalVariableVal: {
613 const llvm::GlobalVariable *GV =
614 static_cast<const llvm::GlobalVariable*>(C);
615 Result.PointerVal =
616 GetOrEmitGlobalVariable(const_cast<llvm::GlobalVariable*>(GV));
617 break;
618 }
619 case llvm::Value::BlockAddressVal: {
620 assert(false && "JIT does not support address-of-label yet!");
621 }
622 default: {
623 llvm_unreachable("Unknown constant pointer type!");
624 }
625 }
626 break;
627 }
628 default: {
629 std::string msg;
630 llvm::raw_string_ostream Msg(msg);
631 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
632 llvm::report_fatal_error(Msg.str());
633 break;
634 }
635 }
636 return;
637}
638
639
640// Stores the data in @Val of type @Ty at address @Addr.
641void CodeEmitter::StoreValueToMemory(const llvm::GenericValue &Val,
642 void *Addr,
643 const llvm::Type *Ty) {
644 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
645
646 switch (Ty->getTypeID()) {
647 case llvm::Type::IntegerTyID: {
648 const llvm::APInt &IntVal = Val.IntVal;
649 assert(((IntVal.getBitWidth() + 7) / 8 >= StoreBytes) &&
650 "Integer too small!");
651
652 const uint8_t *Src =
653 reinterpret_cast<const uint8_t*>(IntVal.getRawData());
654
655 if (llvm::sys::isLittleEndianHost()) {
656 // Little-endian host - the source is ordered from LSB to MSB.
657 // Order the destination from LSB to MSB: Do a straight copy.
658 memcpy(Addr, Src, StoreBytes);
659 } else {
660 // Big-endian host - the source is an array of 64 bit words
661 // ordered from LSW to MSW.
662 //
663 // Each word is ordered from MSB to LSB.
664 //
665 // Order the destination from MSB to LSB:
666 // Reverse the word order, but not the bytes in a word.
667 unsigned int i = StoreBytes;
668 while (i > sizeof(uint64_t)) {
669 i -= sizeof(uint64_t);
670 ::memcpy(reinterpret_cast<uint8_t*>(Addr) + i,
671 Src,
672 sizeof(uint64_t));
673 Src += sizeof(uint64_t);
674 }
675 ::memcpy(Addr, Src + sizeof(uint64_t) - i, i);
676 }
677 break;
678 }
679 case llvm::Type::FloatTyID: {
680 *reinterpret_cast<float*>(Addr) = Val.FloatVal;
681 break;
682 }
683 case llvm::Type::DoubleTyID: {
684 *reinterpret_cast<double*>(Addr) = Val.DoubleVal;
685 break;
686 }
687 case llvm::Type::X86_FP80TyID: {
688 memcpy(Addr, Val.IntVal.getRawData(), 10);
689 break;
690 }
691 case llvm::Type::PointerTyID: {
692 // Ensure 64 bit target pointers are fully initialized on 32 bit
693 // hosts.
694 if (StoreBytes != sizeof(llvm::PointerTy))
695 memset(Addr, 0, StoreBytes);
696 *((llvm::PointerTy*) Addr) = Val.PointerVal;
697 break;
698 }
699 default: {
700 break;
701 }
702 }
703
704 if (llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
705 std::reverse(reinterpret_cast<uint8_t*>(Addr),
706 reinterpret_cast<uint8_t*>(Addr) + StoreBytes);
707
708 return;
709}
710
711
712// Recursive function to apply a @Constant value into the specified memory
713// location @Addr.
714void CodeEmitter::InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
715 switch (C->getValueID()) {
716 case llvm::Value::UndefValueVal: {
717 // Nothing to do
718 break;
719 }
720 case llvm::Value::ConstantVectorVal: {
721 // dynamic cast may hurt performance
722 const llvm::ConstantVector *CP = (llvm::ConstantVector*) C;
723
724 unsigned int ElementSize = mpTD->getTypeAllocSize
725 (CP->getType()->getElementType());
726
727 for (int i = 0, e = CP->getNumOperands(); i != e;i++)
728 InitializeConstantToMemory(
729 CP->getOperand(i),
730 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
731 break;
732 }
733 case llvm::Value::ConstantAggregateZeroVal: {
734 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
735 break;
736 }
737 case llvm::Value::ConstantArrayVal: {
738 const llvm::ConstantArray *CPA = (llvm::ConstantArray*) C;
739 unsigned int ElementSize = mpTD->getTypeAllocSize
740 (CPA->getType()->getElementType());
741
742 for (int i = 0, e = CPA->getNumOperands(); i != e; i++)
743 InitializeConstantToMemory(
744 CPA->getOperand(i),
745 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
746 break;
747 }
748 case llvm::Value::ConstantStructVal: {
749 const llvm::ConstantStruct *CPS =
750 static_cast<const llvm::ConstantStruct*>(C);
751 const llvm::StructLayout *SL = mpTD->getStructLayout
752 (llvm::cast<llvm::StructType>(CPS->getType()));
753
754 for (int i = 0, e = CPS->getNumOperands(); i != e; i++)
755 InitializeConstantToMemory(
756 CPS->getOperand(i),
757 reinterpret_cast<uint8_t*>(Addr) + SL->getElementOffset(i));
758 break;
759 }
760 default: {
761 if (C->getType()->isFirstClassType()) {
762 llvm::GenericValue Val;
763 GetConstantValue(C, Val);
764 StoreValueToMemory(Val, Addr, C->getType());
765 } else {
766 llvm_unreachable("Unknown constant type to initialize memory "
767 "with!");
768 }
769 break;
770 }
771 }
772 return;
773}
774
775
776void CodeEmitter::emitConstantPool(llvm::MachineConstantPool *MCP) {
777 if (mpTJI->hasCustomConstantPool())
778 return;
779
780 // Constant pool address resolution is handled by the target itself in ARM
781 // (TargetJITInfo::hasCustomConstantPool() returns true).
782#if !defined(PROVIDE_ARM_CODEGEN)
783 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
784 MCP->getConstants();
785
786 if (Constants.empty())
787 return;
788
789 unsigned Size = GetConstantPoolSizeInBytes(MCP);
790 unsigned Align = MCP->getConstantPoolAlignment();
791
792 mpConstantPoolBase = allocateSpace(Size, Align);
793 mpConstantPool = MCP;
794
795 if (mpConstantPoolBase == NULL)
796 return; // out of memory
797
798 unsigned Offset = 0;
799 for (int i = 0, e = Constants.size(); i != e; i++) {
800 llvm::MachineConstantPoolEntry CPE = Constants[i];
801 unsigned AlignMask = CPE.getAlignment() - 1;
802 Offset = (Offset + AlignMask) & ~AlignMask;
803
804 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
805 mConstPoolAddresses.push_back(CAddr);
806
807 if (CPE.isMachineConstantPoolEntry())
808 llvm::report_fatal_error
809 ("Initialize memory with machine specific constant pool"
810 " entry has not been implemented!");
811
812 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
813
814 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
815 Offset += mpTD->getTypeAllocSize(Ty);
816 }
817#endif
818 return;
819}
820
821
822void CodeEmitter::initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
823 if (mpTJI->hasCustomJumpTables())
824 return;
825
826 const std::vector<llvm::MachineJumpTableEntry> &JT =
827 MJTI->getJumpTables();
828 if (JT.empty())
829 return;
830
831 unsigned NumEntries = 0;
832 for (int i = 0, e = JT.size(); i != e; i++)
833 NumEntries += JT[i].MBBs.size();
834
835 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
836
837 mpJumpTable = MJTI;
838 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
839 MJTI->getEntryAlignment(*mpTD));
840
841 return;
842}
843
844
845void CodeEmitter::emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
846 if (mpTJI->hasCustomJumpTables())
847 return;
848
849 const std::vector<llvm::MachineJumpTableEntry> &JT =
850 MJTI->getJumpTables();
851 if (JT.empty() || mpJumpTableBase == 0)
852 return;
853
854 assert(llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static &&
855 (MJTI->getEntrySize(*mpTD) == sizeof(mpTD /* a pointer type */)) &&
856 "Cross JIT'ing?");
857
858 // For each jump table, map each target in the jump table to the
859 // address of an emitted MachineBasicBlock.
860 intptr_t *SlotPtr = reinterpret_cast<intptr_t*>(mpJumpTableBase);
861 for (int i = 0, ie = JT.size(); i != ie; i++) {
862 const std::vector<llvm::MachineBasicBlock*> &MBBs = JT[i].MBBs;
863 // Store the address of the basic block for this jump table slot in the
864 // memory we allocated for the jump table in 'initJumpTableInfo'
865 for (int j = 0, je = MBBs.size(); j != je; j++)
866 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
867 }
868}
869
870
871void *CodeEmitter::GetPointerToGlobal(llvm::GlobalValue *V,
872 void *Reference,
873 bool MayNeedFarStub) {
874 switch (V->getValueID()) {
875 case llvm::Value::FunctionVal: {
876 llvm::Function *F = (llvm::Function*) V;
877
878 // If we have code, go ahead and return that.
879 if (void *ResultPtr = GetPointerToGlobalIfAvailable(F))
880 return ResultPtr;
881
882 if (void *FnStub = GetLazyFunctionStubIfAvailable(F))
883 // Return the function stub if it's already created.
884 // We do this first so that:
885 // we're returning the same address for the function as any
886 // previous call.
887 //
888 // TODO(llvm.org): Yes, this is wrong. The lazy stub isn't
889 // guaranteed to be close enough to call.
890 return FnStub;
891
892 // If we know the target can handle arbitrary-distance calls, try to
893 // return a direct pointer.
894 if (!MayNeedFarStub) {
895 //
896 // x86_64 architecture may encounter the bug:
897 // http://llvm.org/bugs/show_bug.cgi?id=5201
898 // which generate instruction "call" instead of "callq".
899 //
900 // And once the real address of stub is greater than 64-bit
901 // long, the replacement will truncate to 32-bit resulting a
902 // serious problem.
903#if !defined(__x86_64__)
904 // If this is an external function pointer, we can force the JIT
905 // to 'compile' it, which really just adds it to the map.
906 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
907 return GetPointerToFunction(F, /* AbortOnFailure = */false);
908 // Changing to false because wanting to allow later calls to
909 // mpTJI->relocate() without aborting. For caching purpose
910 }
911#endif
912 }
913
914 // Otherwise, we may need a to emit a stub, and, conservatively, we
915 // always do so.
916 return GetLazyFunctionStub(F);
917 break;
918 }
919 case llvm::Value::GlobalVariableVal: {
920 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
921 break;
922 }
923 case llvm::Value::GlobalAliasVal: {
924 llvm::GlobalAlias *GA = (llvm::GlobalAlias*) V;
925 const llvm::GlobalValue *GV = GA->resolveAliasedGlobal(false);
926
927 switch (GV->getValueID()) {
928 case llvm::Value::FunctionVal: {
929 // TODO(all): is there's any possibility that the function is not
930 // code-gen'd?
931 return GetPointerToFunction(
932 static_cast<const llvm::Function*>(GV),
933 /* AbortOnFailure = */false);
934 // Changing to false because wanting to allow later calls to
935 // mpTJI->relocate() without aborting. For caching purpose
936 break;
937 }
938 case llvm::Value::GlobalVariableVal: {
939 if (void *P = mGlobalAddressMap[GV])
940 return P;
941
942 llvm::GlobalVariable *GVar = (llvm::GlobalVariable*) GV;
943 EmitGlobalVariable(GVar);
944
945 return mGlobalAddressMap[GV];
946 break;
947 }
948 case llvm::Value::GlobalAliasVal: {
949 assert(false && "Alias should be resolved ultimately!");
950 }
951 }
952 break;
953 }
954 default: {
955 break;
956 }
957 }
958 llvm_unreachable("Unknown type of global value!");
959}
960
961
962// If the specified function has been code-gen'd, return a pointer to the
963// function. If not, compile it, or use a stub to implement lazy compilation
964// if available.
965void *CodeEmitter::GetPointerToFunctionOrStub(llvm::Function *F) {
966 // If we have already code generated the function, just return the
967 // address.
968 if (void *Addr = GetPointerToGlobalIfAvailable(F))
969 return Addr;
970
971 // Get a stub if the target supports it.
972 return GetLazyFunctionStub(F);
973}
974
975
976void *CodeEmitter::GetLazyFunctionStub(llvm::Function *F) {
977 // If we already have a lazy stub for this function, recycle it.
978 void *&Stub = mFunctionToLazyStubMap[F];
979 if (Stub)
980 return Stub;
981
982 // In any cases, we should NOT resolve function at runtime (though we are
983 // able to). We resolve this right now.
984 void *Actual = NULL;
985 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
986 Actual = GetPointerToFunction(F, /* AbortOnFailure = */false);
987 // Changing to false because wanting to allow later calls to
988 // mpTJI->relocate() without aborting. For caching purpose
989 }
990
991 // Codegen a new stub, calling the actual address of the external
992 // function, if it was resolved.
993 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
994 startGVStub(F, SL.Size, SL.Alignment);
995 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
996 finishGVStub();
997
998 // We really want the address of the stub in the GlobalAddressMap for the
999 // JIT, not the address of the external function.
1000 UpdateGlobalMapping(F, Stub);
1001
1002 if (!Actual)
1003 PendingFunctions.insert(F);
1004 else
1005 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1006 SL.Size, true);
1007
1008 return Stub;
1009}
1010
1011
1012void *CodeEmitter::GetPointerToFunction(const llvm::Function *F,
1013 bool AbortOnFailure) {
1014 void *Addr = GetPointerToGlobalIfAvailable(F);
1015 if (Addr)
1016 return Addr;
1017
1018 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
1019 "Internal error: only external defined function routes here!");
1020
1021 // Handle the failure resolution by ourselves.
1022 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
1023 /* AbortOnFailure = */ false);
1024
1025 // If we resolved the symbol to a null address (eg. a weak external)
1026 // return a null pointer let the application handle it.
1027 if (Addr == NULL) {
1028 if (AbortOnFailure)
1029 llvm::report_fatal_error("Could not resolve external function "
1030 "address: " + F->getName());
1031 else
1032 return NULL;
1033 }
1034
1035 AddGlobalMapping(F, Addr);
1036
1037 return Addr;
1038}
1039
1040
1041void *CodeEmitter::GetPointerToNamedSymbol(const std::string &Name,
1042 bool AbortOnFailure) {
1043 if (void *Addr = FindRuntimeFunction(Name.c_str()))
1044 return Addr;
1045
1046 if (mpSymbolLookupFn)
1047 if (void *Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1048 return Addr;
1049
1050 if (AbortOnFailure)
1051 llvm::report_fatal_error("Program used external symbol '" + Name +
1052 "' which could not be resolved!");
1053
1054 return NULL;
1055}
1056
1057
1058// Return the address of the specified global variable, possibly emitting it
1059// to memory if needed. This is used by the Emitter.
1060void *CodeEmitter::GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1061 void *Ptr = GetPointerToGlobalIfAvailable(GV);
1062 if (Ptr)
1063 return Ptr;
1064
1065 if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1066 // If the global is external, just remember the address.
1067 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1068 AddGlobalMapping(GV, Ptr);
1069 } else {
1070 // If the global hasn't been emitted to memory yet, allocate space and
1071 // emit it into memory.
1072 Ptr = GetMemoryForGV(GV);
1073 AddGlobalMapping(GV, Ptr);
1074 EmitGlobalVariable(GV);
1075 }
1076
1077 return Ptr;
1078}
1079
1080
1081// This method abstracts memory allocation of global variable so that the
1082// JIT can allocate thread local variables depending on the target.
1083void *CodeEmitter::GetMemoryForGV(const llvm::GlobalVariable *GV) {
1084 void *Ptr;
1085
1086 const llvm::Type *GlobalType = GV->getType()->getElementType();
1087 size_t S = mpTD->getTypeAllocSize(GlobalType);
1088 size_t A = mpTD->getPreferredAlignment(GV);
1089
1090 if (GV->isThreadLocal()) {
1091 // We can support TLS by
1092 //
1093 // Ptr = TJI.allocateThreadLocalMemory(S);
1094 //
1095 // But I tend not to.
1096 // (should we disable this in the front-end (i.e., slang)?).
1097 llvm::report_fatal_error
1098 ("Compilation of Thread Local Storage (TLS) is disabled!");
1099
1100 } else if (mpTJI->allocateSeparateGVMemory()) {
1101 if (A <= 8) {
1102 Ptr = malloc(S);
1103 } else {
1104 // Allocate (S + A) bytes of memory, then use an aligned pointer
1105 // within that space.
1106 Ptr = malloc(S + A);
1107 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1108 Ptr = reinterpret_cast<uint8_t*>(Ptr) +
1109 (MisAligned ? (A - MisAligned) : 0);
1110 }
1111 } else {
1112 Ptr = allocateGlobal(S, A);
1113 }
1114
1115 return Ptr;
1116}
1117
1118
1119void CodeEmitter::EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1120 void *GA = GetPointerToGlobalIfAvailable(GV);
1121
1122 if (GV->isThreadLocal())
1123 llvm::report_fatal_error
1124 ("We don't support Thread Local Storage (TLS)!");
1125
1126 if (GA == NULL) {
1127 // If it's not already specified, allocate memory for the global.
1128 GA = GetMemoryForGV(GV);
1129 AddGlobalMapping(GV, GA);
1130 }
1131
1132 InitializeConstantToMemory(GV->getInitializer(), GA);
1133
1134 // You can do some statistics on global variable here.
1135 return;
1136}
1137
1138
1139void *CodeEmitter::GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1140 // Make sure GV is emitted first, and create a stub containing the fully
1141 // resolved address.
1142 void *GVAddress = GetPointerToGlobal(V, Reference, false);
1143
1144 // If we already have a stub for this global variable, recycle it.
1145 void *&IndirectSym = GlobalToIndirectSymMap[V];
1146 // Otherwise, codegen a new indirect symbol.
1147 if (!IndirectSym)
1148 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1149
1150 return IndirectSym;
1151}
1152
1153
1154// Return a stub for the function at the specified address.
1155void *CodeEmitter::GetExternalFunctionStub(void *FnAddr) {
1156 void *&Stub = ExternalFnToStubMap[FnAddr];
1157 if (Stub)
1158 return Stub;
1159
1160 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1161 startGVStub(0, SL.Size, SL.Alignment);
1162 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1163 finishGVStub();
1164
1165 return Stub;
1166}
1167
1168
1169void CodeEmitter::Disassemble(const llvm::StringRef &Name,
1170 uint8_t *Start, size_t Length, bool IsStub) {
1171
1172#if USE_DISASSEMBLER
1173 llvm::raw_ostream *OS;
1174
1175#if USE_DISASSEMBLER_FILE
1176 std::string ErrorInfo;
1177 OS = new llvm::raw_fd_ostream("/data/local/tmp/out.S",
1178 ErrorInfo,
1179 llvm::raw_fd_ostream::F_Append);
1180
1181 if (!ErrorInfo.empty()) { // some errors occurred
1182 // LOGE("Error in creating disassembly file");
1183 delete OS;
1184 return;
1185 }
1186#else
1187 OS = &llvm::outs();
1188#endif
1189
1190 *OS << "JIT: Disassembled code: " << Name << ((IsStub) ? " (stub)" : "")
1191 << "\n";
1192
1193 if (mpAsmInfo == NULL)
1194 mpAsmInfo = mpTarget->createAsmInfo(Compiler::Triple);
1195 if (mpDisassmbler == NULL)
1196 mpDisassmbler = mpTarget->createMCDisassembler();
1197 if (mpIP == NULL)
1198 mpIP = mpTarget->createMCInstPrinter(mpAsmInfo->getAssemblerDialect(),
1199 *mpAsmInfo);
1200
1201 const BufferMemoryObject *BufferMObj = new BufferMemoryObject(Start,
1202 Length);
1203 uint64_t Size;
1204 uint64_t Index;
1205
1206 for (Index = 0; Index < Length; Index += Size) {
1207 llvm::MCInst Inst;
1208
1209 if (mpDisassmbler->getInstruction(Inst, Size, *BufferMObj, Index,
1210 /* REMOVED */ llvm::nulls())) {
1211 (*OS).indent(4)
1212 .write("0x", 2)
1213 .write_hex((uint32_t) Start + Index)
Stephen Hines13cc0372011-01-24 14:00:51 -08001214 .write(": 0x", 4);
1215 (*OS).write_hex((uint32_t) *(uint32_t*)(Start+Index));
Shih-wei Liao67d8f372011-01-16 22:48:35 -08001216 mpIP->printInst(&Inst, *OS);
1217 *OS << "\n";
1218 } else {
1219 if (Size == 0)
1220 Size = 1; // skip illegible bytes
1221 }
1222 }
1223
1224 *OS << "\n";
1225 delete BufferMObj;
1226
1227#if USE_DISASSEMBLER_FILE
1228 // If you want the disassemble results write to file, uncomment this.
1229 ((llvm::raw_fd_ostream*)OS)->close();
1230 delete OS;
1231#endif
1232
1233#endif // USE_DISASSEMBLER
1234}
1235
1236
1237void CodeEmitter::setTargetMachine(llvm::TargetMachine &TM) {
1238 // Set Target
1239 mpTarget = &TM.getTarget();
1240 // Set TargetJITInfo
1241 mpTJI = TM.getJITInfo();
1242 // set TargetData
1243 mpTD = TM.getTargetData();
1244
1245 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
1246
1247 return;
1248}
1249
1250
1251// This callback is invoked when the specified function is about to be code
1252// generated. This initializes the BufferBegin/End/Ptr fields.
1253void CodeEmitter::startFunction(llvm::MachineFunction &F) {
1254 uintptr_t ActualSize = 0;
1255
1256 mpMemMgr->setMemoryWritable();
1257
1258 // BufferBegin, BufferEnd and CurBufferPtr are all inherited from class
1259 // MachineCodeEmitter, which is the super class of the class
1260 // JITCodeEmitter.
1261 //
1262 // BufferBegin/BufferEnd - Pointers to the start and end of the memory
1263 // allocated for this code buffer.
1264 //
1265 // CurBufferPtr - Pointer to the next byte of memory to fill when emitting
1266 // code. This is guranteed to be in the range
1267 // [BufferBegin, BufferEnd]. If this pointer is at
1268 // BufferEnd, it will never move due to code emission, and
1269 // all code emission requests will be ignored (this is the
1270 // buffer overflow condition).
1271 BufferBegin = CurBufferPtr =
1272 mpMemMgr->startFunctionBody(F.getFunction(), ActualSize);
1273 BufferEnd = BufferBegin + ActualSize;
1274
1275 if (mpCurEmitFunction == NULL) {
1276 mpCurEmitFunction = new FuncInfo(); // TODO(all): Allocation check!
1277 mpCurEmitFunction->name = NULL;
1278 mpCurEmitFunction->addr = NULL;
1279 mpCurEmitFunction->size = 0;
1280 }
1281
1282 // Ensure the constant pool/jump table info is at least 4-byte aligned.
1283 emitAlignment(16);
1284
1285 emitConstantPool(F.getConstantPool());
1286 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1287 initJumpTableInfo(MJTI);
1288
1289 // About to start emitting the machine code for the function.
1290 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
1291
1292 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
1293
1294 mpCurEmitFunction->addr = CurBufferPtr;
1295
1296 mMBBLocations.clear();
1297}
1298
1299
1300// This callback is invoked when the specified function has finished code
1301// generation. If a buffer overflow has occurred, this method returns true
1302// (the callee is required to try again).
1303bool CodeEmitter::finishFunction(llvm::MachineFunction &F) {
1304 if (CurBufferPtr == BufferEnd) {
1305 // No enough memory
1306 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1307 return false;
1308 }
1309
1310 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1311 emitJumpTableInfo(MJTI);
1312
1313 // FnStart is the start of the text, not the start of the constant pool
1314 // and other per-function data.
1315 uint8_t *FnStart =
1316 reinterpret_cast<uint8_t*>(
1317 GetPointerToGlobalIfAvailable(F.getFunction()));
1318
1319 // FnEnd is the end of the function's machine code.
1320 uint8_t *FnEnd = CurBufferPtr;
1321
1322 if (!mRelocations.empty()) {
1323 //ptrdiff_t BufferOffset = BufferBegin - mpMemMgr->getCodeMemBase();
1324
1325 // Resolve the relocations to concrete pointers.
1326 for (int i = 0, e = mRelocations.size(); i != e; i++) {
1327 llvm::MachineRelocation &MR = mRelocations[i];
1328 void *ResultPtr = NULL;
1329
1330 if (!MR.letTargetResolve()) {
1331 if (MR.isExternalSymbol()) {
1332 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
1333
1334 if (MR.mayNeedFarStub()) {
1335 ResultPtr = GetExternalFunctionStub(ResultPtr);
1336 }
1337
1338 } else if (MR.isGlobalValue()) {
1339 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
1340 BufferBegin
1341 + MR.getMachineCodeOffset(),
1342 MR.mayNeedFarStub());
1343 } else if (MR.isIndirectSymbol()) {
1344 ResultPtr =
1345 GetPointerToGVIndirectSym(
1346 MR.getGlobalValue(),
1347 BufferBegin + MR.getMachineCodeOffset());
1348 } else if (MR.isBasicBlock()) {
1349 ResultPtr =
1350 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
1351 } else if (MR.isConstantPoolIndex()) {
1352 ResultPtr =
1353 (void*) getConstantPoolEntryAddress(MR.getConstantPoolIndex());
1354 } else {
1355 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
1356 ResultPtr =
1357 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
1358 }
1359
1360 if (!MR.isExternalSymbol() || MR.mayNeedFarStub()) {
1361 // TODO(logan): Cache external symbol relocation entry.
1362 // Currently, we are not caching them. But since Android
1363 // system is using prelink, it is not a problem.
1364#if 0
1365 // Cache the relocation result address
1366 mCachingRelocations.push_back(
1367 oBCCRelocEntry(MR.getRelocationType(),
1368 MR.getMachineCodeOffset() + BufferOffset,
1369 ResultPtr));
1370#endif
1371 }
1372
1373 MR.setResultPointer(ResultPtr);
1374 }
1375 }
1376
1377 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
1378 mpMemMgr->getGOTBase());
1379 }
1380
1381 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1382 // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
1383 // global variables that were referenced in the relocations.
1384 if (CurBufferPtr == BufferEnd)
1385 return false;
1386
1387 // Now that we've succeeded in emitting the function.
1388 mpCurEmitFunction->size = CurBufferPtr - BufferBegin;
1389 BufferBegin = CurBufferPtr = 0;
1390
1391 if (F.getFunction()->hasName()) {
1392 string const &name = F.getFunction()->getNameStr();
1393 mpResult->mEmittedFunctions[name] = mpCurEmitFunction;
1394 mpCurEmitFunction = NULL;
1395 }
1396
1397 mRelocations.clear();
1398 mConstPoolAddresses.clear();
1399
1400 if (mpMMI)
1401 mpMMI->EndFunction();
1402
1403 updateFunctionStub(F.getFunction());
1404
1405 // Mark code region readable and executable if it's not so already.
1406 mpMemMgr->setMemoryExecutable();
1407
1408 Disassemble(F.getFunction()->getName(), FnStart, FnEnd - FnStart, false);
1409
1410 return false;
1411}
1412
1413
1414void CodeEmitter::startGVStub(const llvm::GlobalValue *GV, unsigned StubSize,
1415 unsigned Alignment) {
1416 mpSavedBufferBegin = BufferBegin;
1417 mpSavedBufferEnd = BufferEnd;
1418 mpSavedCurBufferPtr = CurBufferPtr;
1419
1420 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
1421 Alignment);
1422 BufferEnd = BufferBegin + StubSize + 1;
1423
1424 return;
1425}
1426
1427
1428void CodeEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1429 mpSavedBufferBegin = BufferBegin;
1430 mpSavedBufferEnd = BufferEnd;
1431 mpSavedCurBufferPtr = CurBufferPtr;
1432
1433 BufferBegin = CurBufferPtr = reinterpret_cast<uint8_t *>(Buffer);
1434 BufferEnd = BufferBegin + StubSize + 1;
1435
1436 return;
1437}
1438
1439
1440void CodeEmitter::finishGVStub() {
1441 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1442
1443 // restore
1444 BufferBegin = mpSavedBufferBegin;
1445 BufferEnd = mpSavedBufferEnd;
1446 CurBufferPtr = mpSavedCurBufferPtr;
1447}
1448
1449
1450// Allocates and fills storage for an indirect GlobalValue, and returns the
1451// address.
1452void *CodeEmitter::allocIndirectGV(const llvm::GlobalValue *GV,
1453 const uint8_t *Buffer, size_t Size,
1454 unsigned Alignment) {
1455 uint8_t *IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
1456 memcpy(IndGV, Buffer, Size);
1457 return IndGV;
1458}
1459
1460
1461// Allocate memory for a global. Unlike allocateSpace, this method does not
1462// allocate memory in the current output buffer, because a global may live
1463// longer than the current function.
1464void *CodeEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1465 // Delegate this call through the memory manager.
1466 return mpMemMgr->allocateGlobal(Size, Alignment);
1467}
1468
1469
1470// This should be called by the target when a new basic block is about to be
1471// emitted. This way the MCE knows where the start of the block is, and can
1472// implement getMachineBasicBlockAddress.
1473void CodeEmitter::StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
1474 if (mMBBLocations.size() <= (unsigned) MBB->getNumber())
1475 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
1476 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
1477 return;
1478}
1479
1480
1481// Return the address of the jump table with index @Index in the function
1482// that last called initJumpTableInfo.
1483uintptr_t CodeEmitter::getJumpTableEntryAddress(unsigned Index) const {
1484 const std::vector<llvm::MachineJumpTableEntry> &JT =
1485 mpJumpTable->getJumpTables();
1486
1487 assert((Index < JT.size()) && "Invalid jump table index!");
1488
1489 unsigned int Offset = 0;
1490 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
1491
1492 for (unsigned i = 0; i < Index; i++)
1493 Offset += JT[i].MBBs.size();
1494 Offset *= EntrySize;
1495
1496 return (uintptr_t)(reinterpret_cast<uint8_t*>(mpJumpTableBase) + Offset);
1497}
1498
1499
1500// Return the address of the specified MachineBasicBlock, only usable after
1501// the label for the MBB has been emitted.
1502uintptr_t CodeEmitter::getMachineBasicBlockAddress(
1503 llvm::MachineBasicBlock *MBB) const {
1504 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
1505 mMBBLocations[MBB->getNumber()] &&
1506 "MBB not emitted!");
1507 return mMBBLocations[MBB->getNumber()];
1508}
1509
1510
1511void CodeEmitter::updateFunctionStub(const llvm::Function *F) {
1512 // Get the empty stub we generated earlier.
1513 void *Stub;
1514 std::set<const llvm::Function*>::iterator I = PendingFunctions.find(F);
1515 if (I != PendingFunctions.end())
1516 Stub = mFunctionToLazyStubMap[F];
1517 else
1518 return;
1519
1520 void *Addr = GetPointerToGlobalIfAvailable(F);
1521
1522 assert(Addr != Stub &&
1523 "Function must have non-stub address to be updated.");
1524
1525 // Tell the target jit info to rewrite the stub at the specified address,
1526 // rather than creating a new one.
1527 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1528 startGVStub(Stub, SL.Size);
1529 mpTJI->emitFunctionStub(F, Addr, *this);
1530 finishGVStub();
1531
1532 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1533 SL.Size, true);
1534
1535 PendingFunctions.erase(I);
1536}
1537
1538
1539} // namespace bcc