blob: 0029909daf77adc8e4a6781f4b861190a2a902b0 [file] [log] [blame]
Shih-wei Liao67d8f372011-01-16 22:48:35 -08001//===-- CodeEmitter.cpp - CodeEmitter Class -------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See external/llvm/LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the CodeEmitter class.
11//
12//===----------------------------------------------------------------------===//
13
14#define LOG_TAG "bcc"
15#include <cutils/log.h>
16
17#include "CodeEmitter.h"
18
19#include "Config.h"
20
21#include "CodeMemoryManager.h"
22#include "Runtime.h"
23#include "ScriptCompiled.h"
24
25#include <bcc/bcc.h>
26#include <bcc/bcc_cache.h>
27#include "bcc_internal.h"
28
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/APInt.h"
31#include "llvm/ADT/DenseMap.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringRef.h"
34
35#include "llvm/CodeGen/MachineBasicBlock.h"
36#include "llvm/CodeGen/MachineConstantPool.h"
37#include "llvm/CodeGen/MachineFunction.h"
38#include "llvm/CodeGen/MachineModuleInfo.h"
39#include "llvm/CodeGen/MachineRelocation.h"
40#include "llvm/CodeGen/MachineJumpTableInfo.h"
41#include "llvm/CodeGen/JITCodeEmitter.h"
42
43#include "llvm/ExecutionEngine/GenericValue.h"
44
45#include "llvm/MC/MCAsmInfo.h"
46#include "llvm/MC/MCDisassembler.h"
47#include "llvm/MC/MCInst.h"
48#include "llvm/MC/MCInstPrinter.h"
49
50#include "llvm/Support/ErrorHandling.h"
51#include "llvm/Support/raw_ostream.h"
52
53#if USE_DISASSEMBLER
54#include "llvm/Support/MemoryObject.h"
55#endif
56
Logan Chienc4ea07f2011-03-09 17:27:50 +080057#include "llvm/Support/Host.h"
Shih-wei Liao67d8f372011-01-16 22:48:35 -080058
59#include "llvm/Target/TargetData.h"
60#include "llvm/Target/TargetMachine.h"
61#include "llvm/Target/TargetRegistry.h"
62#include "llvm/Target/TargetJITInfo.h"
63
64#include "llvm/Constant.h"
65#include "llvm/Constants.h"
66#include "llvm/DerivedTypes.h"
67#include "llvm/Function.h"
68#include "llvm/GlobalAlias.h"
69#include "llvm/GlobalValue.h"
70#include "llvm/GlobalVariable.h"
71#include "llvm/Instruction.h"
72#include "llvm/Type.h"
73
74#include <algorithm>
75#include <vector>
76#include <set>
77#include <string>
78
79#include <stddef.h>
80
81
82namespace {
83
84#if USE_DISASSEMBLER
85class BufferMemoryObject : public llvm::MemoryObject {
86private:
87 const uint8_t *mBytes;
88 uint64_t mLength;
89
90public:
91 BufferMemoryObject(const uint8_t *Bytes, uint64_t Length)
92 : mBytes(Bytes), mLength(Length) {
93 }
94
95 virtual uint64_t getBase() const { return 0; }
96 virtual uint64_t getExtent() const { return mLength; }
97
98 virtual int readByte(uint64_t Addr, uint8_t *Byte) const {
99 if (Addr > getExtent())
100 return -1;
101 *Byte = mBytes[Addr];
102 return 0;
103 }
104};
105#endif
106
107}; // namespace anonymous
108
109
110namespace bcc {
111
112// Will take the ownership of @MemMgr
113CodeEmitter::CodeEmitter(ScriptCompiled *result, CodeMemoryManager *pMemMgr)
114 : mpResult(result),
115 mpMemMgr(pMemMgr),
116 mpTarget(NULL),
117 mpTJI(NULL),
118 mpTD(NULL),
119 mpCurEmitFunction(NULL),
120 mpConstantPool(NULL),
121 mpJumpTable(NULL),
122 mpMMI(NULL),
123#if USE_DISASSEMBLER
124 mpAsmInfo(NULL),
125 mpDisassmbler(NULL),
126 mpIP(NULL),
127#endif
128 mpSymbolLookupFn(NULL),
129 mpSymbolLookupContext(NULL) {
130}
131
132
133CodeEmitter::~CodeEmitter() {
134#if USE_DISASSEMBLER
135 delete mpAsmInfo;
136 delete mpDisassmbler;
137 delete mpIP;
138#endif
139}
140
141
142// Once you finish the compilation on a translation unit, you can call this
143// function to recycle the memory (which is used at compilation time and not
144// needed for runtime).
145//
146// NOTE: You should not call this funtion until the code-gen passes for a
147// given module is done. Otherwise, the results is undefined and may
148// cause the system crash!
149void CodeEmitter::releaseUnnecessary() {
150 mMBBLocations.clear();
151 mLabelLocations.clear();
152 mGlobalAddressMap.clear();
153 mFunctionToLazyStubMap.clear();
154 GlobalToIndirectSymMap.clear();
155 ExternalFnToStubMap.clear();
156 PendingFunctions.clear();
157}
158
159
160void CodeEmitter::reset() {
161 releaseUnnecessary();
162
163 mpResult = NULL;
164
165 mpSymbolLookupFn = NULL;
166 mpSymbolLookupContext = NULL;
167
168 mpTJI = NULL;
169 mpTD = NULL;
170
171 mpMemMgr->reset();
172}
173
174
175void *CodeEmitter::UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
176 if (Addr == NULL) {
177 // Removing mapping
178 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
179 void *OldVal;
180
181 if (I == mGlobalAddressMap.end()) {
182 OldVal = NULL;
183 } else {
184 OldVal = I->second;
185 mGlobalAddressMap.erase(I);
186 }
187
188 return OldVal;
189 }
190
191 void *&CurVal = mGlobalAddressMap[GV];
192 void *OldVal = CurVal;
193
194 CurVal = Addr;
195
196 return OldVal;
197}
198
199
200unsigned int CodeEmitter::GetConstantPoolSizeInBytes(
201 llvm::MachineConstantPool *MCP) {
202 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
203 MCP->getConstants();
204
205 if (Constants.empty())
206 return 0;
207
208 unsigned int Size = 0;
209 for (int i = 0, e = Constants.size(); i != e; i++) {
210 llvm::MachineConstantPoolEntry CPE = Constants[i];
211 unsigned int AlignMask = CPE.getAlignment() - 1;
212 Size = (Size + AlignMask) & ~AlignMask;
213 const llvm::Type *Ty = CPE.getType();
214 Size += mpTD->getTypeAllocSize(Ty);
215 }
216
217 return Size;
218}
219
220// This function converts a Constant* into a GenericValue. The interesting
221// part is if C is a ConstantExpr.
222void CodeEmitter::GetConstantValue(const llvm::Constant *C,
223 llvm::GenericValue &Result) {
224 if (C->getValueID() == llvm::Value::UndefValueVal)
225 return;
226 else if (C->getValueID() == llvm::Value::ConstantExprVal) {
227 const llvm::ConstantExpr *CE = (llvm::ConstantExpr*) C;
228 const llvm::Constant *Op0 = CE->getOperand(0);
229
230 switch (CE->getOpcode()) {
231 case llvm::Instruction::GetElementPtr: {
232 // Compute the index
233 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
234 CE->op_end());
235 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
236 &Indices[0],
237 Indices.size());
238
239 GetConstantValue(Op0, Result);
240 Result.PointerVal =
241 static_cast<uint8_t*>(Result.PointerVal) + Offset;
242
243 return;
244 }
245 case llvm::Instruction::Trunc: {
246 uint32_t BitWidth =
247 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
248
249 GetConstantValue(Op0, Result);
250 Result.IntVal = Result.IntVal.trunc(BitWidth);
251
252 return;
253 }
254 case llvm::Instruction::ZExt: {
255 uint32_t BitWidth =
256 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
257
258 GetConstantValue(Op0, Result);
259 Result.IntVal = Result.IntVal.zext(BitWidth);
260
261 return;
262 }
263 case llvm::Instruction::SExt: {
264 uint32_t BitWidth =
265 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
266
267 GetConstantValue(Op0, Result);
268 Result.IntVal = Result.IntVal.sext(BitWidth);
269
270 return;
271 }
272 case llvm::Instruction::FPTrunc: {
273 // TODO(all): fixme: long double
274 GetConstantValue(Op0, Result);
275 Result.FloatVal = static_cast<float>(Result.DoubleVal);
276 return;
277 }
278 case llvm::Instruction::FPExt: {
279 // TODO(all): fixme: long double
280 GetConstantValue(Op0, Result);
281 Result.DoubleVal = static_cast<double>(Result.FloatVal);
282 return;
283 }
284 case llvm::Instruction::UIToFP: {
285 GetConstantValue(Op0, Result);
286 if (CE->getType()->isFloatTy())
287 Result.FloatVal =
288 static_cast<float>(Result.IntVal.roundToDouble());
289 else if (CE->getType()->isDoubleTy())
290 Result.DoubleVal = Result.IntVal.roundToDouble();
291 else if (CE->getType()->isX86_FP80Ty()) {
292 const uint64_t zero[] = { 0, 0 };
293 llvm::APFloat apf(llvm::APInt(80, 2, zero));
294 apf.convertFromAPInt(Result.IntVal,
295 false,
296 llvm::APFloat::rmNearestTiesToEven);
297 Result.IntVal = apf.bitcastToAPInt();
298 }
299 return;
300 }
301 case llvm::Instruction::SIToFP: {
302 GetConstantValue(Op0, Result);
303 if (CE->getType()->isFloatTy())
304 Result.FloatVal =
305 static_cast<float>(Result.IntVal.signedRoundToDouble());
306 else if (CE->getType()->isDoubleTy())
307 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
308 else if (CE->getType()->isX86_FP80Ty()) {
309 const uint64_t zero[] = { 0, 0 };
310 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
311 apf.convertFromAPInt(Result.IntVal,
312 true,
313 llvm::APFloat::rmNearestTiesToEven);
314 Result.IntVal = apf.bitcastToAPInt();
315 }
316 return;
317 }
318 // double->APInt conversion handles sign
319 case llvm::Instruction::FPToUI:
320 case llvm::Instruction::FPToSI: {
321 uint32_t BitWidth =
322 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
323
324 GetConstantValue(Op0, Result);
325 if (Op0->getType()->isFloatTy())
326 Result.IntVal =
327 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
328 else if (Op0->getType()->isDoubleTy())
329 Result.IntVal =
330 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal,
331 BitWidth);
332 else if (Op0->getType()->isX86_FP80Ty()) {
333 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
334 uint64_t V;
335 bool Ignored;
336 apf.convertToInteger(&V,
337 BitWidth,
338 CE->getOpcode() == llvm::Instruction::FPToSI,
339 llvm::APFloat::rmTowardZero,
340 &Ignored);
341 Result.IntVal = V; // endian?
342 }
343 return;
344 }
345 case llvm::Instruction::PtrToInt: {
346 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
347
348 GetConstantValue(Op0, Result);
349 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
350 (Result.PointerVal));
351
352 return;
353 }
354 case llvm::Instruction::IntToPtr: {
355 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
356
357 GetConstantValue(Op0, Result);
358 if (PtrWidth != Result.IntVal.getBitWidth())
359 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
360 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
361
362 Result.PointerVal =
363 llvm::PointerTy(
364 static_cast<uintptr_t>(Result.IntVal.getZExtValue()));
365
366 return;
367 }
368 case llvm::Instruction::BitCast: {
369 GetConstantValue(Op0, Result);
370 const llvm::Type *DestTy = CE->getType();
371
372 switch (Op0->getType()->getTypeID()) {
373 case llvm::Type::IntegerTyID: {
374 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
375 if (DestTy->isFloatTy())
376 Result.FloatVal = Result.IntVal.bitsToFloat();
377 else if (DestTy->isDoubleTy())
378 Result.DoubleVal = Result.IntVal.bitsToDouble();
379 break;
380 }
381 case llvm::Type::FloatTyID: {
382 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
383 Result.IntVal.floatToBits(Result.FloatVal);
384 break;
385 }
386 case llvm::Type::DoubleTyID: {
387 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
388 Result.IntVal.doubleToBits(Result.DoubleVal);
389 break;
390 }
391 case llvm::Type::PointerTyID: {
392 assert(DestTy->isPointerTy() && "Invalid bitcast");
393 break; // getConstantValue(Op0) above already converted it
394 }
395 default: {
396 llvm_unreachable("Invalid bitcast operand");
397 }
398 }
399 return;
400 }
401 case llvm::Instruction::Add:
402 case llvm::Instruction::FAdd:
403 case llvm::Instruction::Sub:
404 case llvm::Instruction::FSub:
405 case llvm::Instruction::Mul:
406 case llvm::Instruction::FMul:
407 case llvm::Instruction::UDiv:
408 case llvm::Instruction::SDiv:
409 case llvm::Instruction::URem:
410 case llvm::Instruction::SRem:
411 case llvm::Instruction::And:
412 case llvm::Instruction::Or:
413 case llvm::Instruction::Xor: {
414 llvm::GenericValue LHS, RHS;
415 GetConstantValue(Op0, LHS);
416 GetConstantValue(CE->getOperand(1), RHS);
417
418 switch (Op0->getType()->getTypeID()) {
419 case llvm::Type::IntegerTyID: {
420 switch (CE->getOpcode()) {
421 case llvm::Instruction::Add: {
422 Result.IntVal = LHS.IntVal + RHS.IntVal;
423 break;
424 }
425 case llvm::Instruction::Sub: {
426 Result.IntVal = LHS.IntVal - RHS.IntVal;
427 break;
428 }
429 case llvm::Instruction::Mul: {
430 Result.IntVal = LHS.IntVal * RHS.IntVal;
431 break;
432 }
433 case llvm::Instruction::UDiv: {
434 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
435 break;
436 }
437 case llvm::Instruction::SDiv: {
438 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
439 break;
440 }
441 case llvm::Instruction::URem: {
442 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
443 break;
444 }
445 case llvm::Instruction::SRem: {
446 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
447 break;
448 }
449 case llvm::Instruction::And: {
450 Result.IntVal = LHS.IntVal & RHS.IntVal;
451 break;
452 }
453 case llvm::Instruction::Or: {
454 Result.IntVal = LHS.IntVal | RHS.IntVal;
455 break;
456 }
457 case llvm::Instruction::Xor: {
458 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
459 break;
460 }
461 default: {
462 llvm_unreachable("Invalid integer opcode");
463 }
464 }
465 break;
466 }
467 case llvm::Type::FloatTyID: {
468 switch (CE->getOpcode()) {
469 case llvm::Instruction::FAdd: {
470 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
471 break;
472 }
473 case llvm::Instruction::FSub: {
474 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
475 break;
476 }
477 case llvm::Instruction::FMul: {
478 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
479 break;
480 }
481 case llvm::Instruction::FDiv: {
482 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
483 break;
484 }
485 case llvm::Instruction::FRem: {
486 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
487 break;
488 }
489 default: {
490 llvm_unreachable("Invalid float opcode");
491 }
492 }
493 break;
494 }
495 case llvm::Type::DoubleTyID: {
496 switch (CE->getOpcode()) {
497 case llvm::Instruction::FAdd: {
498 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
499 break;
500 }
501 case llvm::Instruction::FSub: {
502 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
503 break;
504 }
505 case llvm::Instruction::FMul: {
506 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
507 break;
508 }
509 case llvm::Instruction::FDiv: {
510 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
511 break;
512 }
513 case llvm::Instruction::FRem: {
514 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
515 break;
516 }
517 default: {
518 llvm_unreachable("Invalid double opcode");
519 }
520 }
521 break;
522 }
523 case llvm::Type::X86_FP80TyID:
524 case llvm::Type::PPC_FP128TyID:
525 case llvm::Type::FP128TyID: {
526 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
527 switch (CE->getOpcode()) {
528 case llvm::Instruction::FAdd: {
529 apfLHS.add(llvm::APFloat(RHS.IntVal),
530 llvm::APFloat::rmNearestTiesToEven);
531 break;
532 }
533 case llvm::Instruction::FSub: {
534 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
535 llvm::APFloat::rmNearestTiesToEven);
536 break;
537 }
538 case llvm::Instruction::FMul: {
539 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
540 llvm::APFloat::rmNearestTiesToEven);
541 break;
542 }
543 case llvm::Instruction::FDiv: {
544 apfLHS.divide(llvm::APFloat(RHS.IntVal),
545 llvm::APFloat::rmNearestTiesToEven);
546 break;
547 }
548 case llvm::Instruction::FRem: {
549 apfLHS.mod(llvm::APFloat(RHS.IntVal),
550 llvm::APFloat::rmNearestTiesToEven);
551 break;
552 }
553 default: {
554 llvm_unreachable("Invalid long double opcode");
555 }
556 }
557 Result.IntVal = apfLHS.bitcastToAPInt();
558 break;
559 }
560 default: {
561 llvm_unreachable("Bad add type!");
562 }
563 } // End switch (Op0->getType()->getTypeID())
564 return;
565 }
566 default: {
567 break;
568 }
569 } // End switch (CE->getOpcode())
570
571 std::string msg;
572 llvm::raw_string_ostream Msg(msg);
573 Msg << "ConstantExpr not handled: " << *CE;
574 llvm::report_fatal_error(Msg.str());
575 } // C->getValueID() == llvm::Value::ConstantExprVal
576
577 switch (C->getType()->getTypeID()) {
578 case llvm::Type::FloatTyID: {
579 Result.FloatVal =
580 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToFloat();
581 break;
582 }
583 case llvm::Type::DoubleTyID: {
584 Result.DoubleVal =
585 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToDouble();
586 break;
587 }
588 case llvm::Type::X86_FP80TyID:
589 case llvm::Type::FP128TyID:
590 case llvm::Type::PPC_FP128TyID: {
591 Result.IntVal =
592 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().bitcastToAPInt();
593 break;
594 }
595 case llvm::Type::IntegerTyID: {
596 Result.IntVal =
597 llvm::cast<llvm::ConstantInt>(C)->getValue();
598 break;
599 }
600 case llvm::Type::PointerTyID: {
601 switch (C->getValueID()) {
602 case llvm::Value::ConstantPointerNullVal: {
603 Result.PointerVal = NULL;
604 break;
605 }
606 case llvm::Value::FunctionVal: {
607 const llvm::Function *F = static_cast<const llvm::Function*>(C);
608 Result.PointerVal =
609 GetPointerToFunctionOrStub(const_cast<llvm::Function*>(F));
610 break;
611 }
612 case llvm::Value::GlobalVariableVal: {
613 const llvm::GlobalVariable *GV =
614 static_cast<const llvm::GlobalVariable*>(C);
615 Result.PointerVal =
616 GetOrEmitGlobalVariable(const_cast<llvm::GlobalVariable*>(GV));
617 break;
618 }
619 case llvm::Value::BlockAddressVal: {
620 assert(false && "JIT does not support address-of-label yet!");
621 }
622 default: {
623 llvm_unreachable("Unknown constant pointer type!");
624 }
625 }
626 break;
627 }
628 default: {
629 std::string msg;
630 llvm::raw_string_ostream Msg(msg);
631 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
632 llvm::report_fatal_error(Msg.str());
633 break;
634 }
635 }
636 return;
637}
638
639
640// Stores the data in @Val of type @Ty at address @Addr.
641void CodeEmitter::StoreValueToMemory(const llvm::GenericValue &Val,
642 void *Addr,
643 const llvm::Type *Ty) {
644 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
645
646 switch (Ty->getTypeID()) {
647 case llvm::Type::IntegerTyID: {
648 const llvm::APInt &IntVal = Val.IntVal;
649 assert(((IntVal.getBitWidth() + 7) / 8 >= StoreBytes) &&
650 "Integer too small!");
651
652 const uint8_t *Src =
653 reinterpret_cast<const uint8_t*>(IntVal.getRawData());
654
655 if (llvm::sys::isLittleEndianHost()) {
656 // Little-endian host - the source is ordered from LSB to MSB.
657 // Order the destination from LSB to MSB: Do a straight copy.
658 memcpy(Addr, Src, StoreBytes);
659 } else {
660 // Big-endian host - the source is an array of 64 bit words
661 // ordered from LSW to MSW.
662 //
663 // Each word is ordered from MSB to LSB.
664 //
665 // Order the destination from MSB to LSB:
666 // Reverse the word order, but not the bytes in a word.
667 unsigned int i = StoreBytes;
668 while (i > sizeof(uint64_t)) {
669 i -= sizeof(uint64_t);
670 ::memcpy(reinterpret_cast<uint8_t*>(Addr) + i,
671 Src,
672 sizeof(uint64_t));
673 Src += sizeof(uint64_t);
674 }
675 ::memcpy(Addr, Src + sizeof(uint64_t) - i, i);
676 }
677 break;
678 }
679 case llvm::Type::FloatTyID: {
680 *reinterpret_cast<float*>(Addr) = Val.FloatVal;
681 break;
682 }
683 case llvm::Type::DoubleTyID: {
684 *reinterpret_cast<double*>(Addr) = Val.DoubleVal;
685 break;
686 }
687 case llvm::Type::X86_FP80TyID: {
688 memcpy(Addr, Val.IntVal.getRawData(), 10);
689 break;
690 }
691 case llvm::Type::PointerTyID: {
692 // Ensure 64 bit target pointers are fully initialized on 32 bit
693 // hosts.
694 if (StoreBytes != sizeof(llvm::PointerTy))
695 memset(Addr, 0, StoreBytes);
696 *((llvm::PointerTy*) Addr) = Val.PointerVal;
697 break;
698 }
699 default: {
700 break;
701 }
702 }
703
704 if (llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
705 std::reverse(reinterpret_cast<uint8_t*>(Addr),
706 reinterpret_cast<uint8_t*>(Addr) + StoreBytes);
707
708 return;
709}
710
711
712// Recursive function to apply a @Constant value into the specified memory
713// location @Addr.
714void CodeEmitter::InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
715 switch (C->getValueID()) {
716 case llvm::Value::UndefValueVal: {
717 // Nothing to do
718 break;
719 }
720 case llvm::Value::ConstantVectorVal: {
721 // dynamic cast may hurt performance
722 const llvm::ConstantVector *CP = (llvm::ConstantVector*) C;
723
724 unsigned int ElementSize = mpTD->getTypeAllocSize
725 (CP->getType()->getElementType());
726
727 for (int i = 0, e = CP->getNumOperands(); i != e;i++)
728 InitializeConstantToMemory(
729 CP->getOperand(i),
730 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
731 break;
732 }
733 case llvm::Value::ConstantAggregateZeroVal: {
734 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
735 break;
736 }
737 case llvm::Value::ConstantArrayVal: {
738 const llvm::ConstantArray *CPA = (llvm::ConstantArray*) C;
739 unsigned int ElementSize = mpTD->getTypeAllocSize
740 (CPA->getType()->getElementType());
741
742 for (int i = 0, e = CPA->getNumOperands(); i != e; i++)
743 InitializeConstantToMemory(
744 CPA->getOperand(i),
745 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
746 break;
747 }
748 case llvm::Value::ConstantStructVal: {
749 const llvm::ConstantStruct *CPS =
750 static_cast<const llvm::ConstantStruct*>(C);
751 const llvm::StructLayout *SL = mpTD->getStructLayout
752 (llvm::cast<llvm::StructType>(CPS->getType()));
753
754 for (int i = 0, e = CPS->getNumOperands(); i != e; i++)
755 InitializeConstantToMemory(
756 CPS->getOperand(i),
757 reinterpret_cast<uint8_t*>(Addr) + SL->getElementOffset(i));
758 break;
759 }
760 default: {
761 if (C->getType()->isFirstClassType()) {
762 llvm::GenericValue Val;
763 GetConstantValue(C, Val);
764 StoreValueToMemory(Val, Addr, C->getType());
765 } else {
766 llvm_unreachable("Unknown constant type to initialize memory "
767 "with!");
768 }
769 break;
770 }
771 }
772 return;
773}
774
775
776void CodeEmitter::emitConstantPool(llvm::MachineConstantPool *MCP) {
777 if (mpTJI->hasCustomConstantPool())
778 return;
779
780 // Constant pool address resolution is handled by the target itself in ARM
781 // (TargetJITInfo::hasCustomConstantPool() returns true).
782#if !defined(PROVIDE_ARM_CODEGEN)
783 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
784 MCP->getConstants();
785
786 if (Constants.empty())
787 return;
788
789 unsigned Size = GetConstantPoolSizeInBytes(MCP);
790 unsigned Align = MCP->getConstantPoolAlignment();
791
792 mpConstantPoolBase = allocateSpace(Size, Align);
793 mpConstantPool = MCP;
794
795 if (mpConstantPoolBase == NULL)
796 return; // out of memory
797
798 unsigned Offset = 0;
799 for (int i = 0, e = Constants.size(); i != e; i++) {
800 llvm::MachineConstantPoolEntry CPE = Constants[i];
801 unsigned AlignMask = CPE.getAlignment() - 1;
802 Offset = (Offset + AlignMask) & ~AlignMask;
803
804 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
805 mConstPoolAddresses.push_back(CAddr);
806
807 if (CPE.isMachineConstantPoolEntry())
808 llvm::report_fatal_error
809 ("Initialize memory with machine specific constant pool"
810 " entry has not been implemented!");
811
812 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
813
814 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
815 Offset += mpTD->getTypeAllocSize(Ty);
816 }
817#endif
818 return;
819}
820
821
822void CodeEmitter::initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
823 if (mpTJI->hasCustomJumpTables())
824 return;
825
826 const std::vector<llvm::MachineJumpTableEntry> &JT =
827 MJTI->getJumpTables();
828 if (JT.empty())
829 return;
830
831 unsigned NumEntries = 0;
832 for (int i = 0, e = JT.size(); i != e; i++)
833 NumEntries += JT[i].MBBs.size();
834
835 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
836
837 mpJumpTable = MJTI;
838 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
839 MJTI->getEntryAlignment(*mpTD));
840
841 return;
842}
843
844
845void CodeEmitter::emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
846 if (mpTJI->hasCustomJumpTables())
847 return;
848
849 const std::vector<llvm::MachineJumpTableEntry> &JT =
850 MJTI->getJumpTables();
851 if (JT.empty() || mpJumpTableBase == 0)
852 return;
853
854 assert(llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static &&
855 (MJTI->getEntrySize(*mpTD) == sizeof(mpTD /* a pointer type */)) &&
856 "Cross JIT'ing?");
857
858 // For each jump table, map each target in the jump table to the
859 // address of an emitted MachineBasicBlock.
860 intptr_t *SlotPtr = reinterpret_cast<intptr_t*>(mpJumpTableBase);
861 for (int i = 0, ie = JT.size(); i != ie; i++) {
862 const std::vector<llvm::MachineBasicBlock*> &MBBs = JT[i].MBBs;
863 // Store the address of the basic block for this jump table slot in the
864 // memory we allocated for the jump table in 'initJumpTableInfo'
865 for (int j = 0, je = MBBs.size(); j != je; j++)
866 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
867 }
868}
869
870
871void *CodeEmitter::GetPointerToGlobal(llvm::GlobalValue *V,
872 void *Reference,
873 bool MayNeedFarStub) {
874 switch (V->getValueID()) {
875 case llvm::Value::FunctionVal: {
876 llvm::Function *F = (llvm::Function*) V;
877
878 // If we have code, go ahead and return that.
879 if (void *ResultPtr = GetPointerToGlobalIfAvailable(F))
880 return ResultPtr;
881
882 if (void *FnStub = GetLazyFunctionStubIfAvailable(F))
883 // Return the function stub if it's already created.
884 // We do this first so that:
885 // we're returning the same address for the function as any
886 // previous call.
887 //
888 // TODO(llvm.org): Yes, this is wrong. The lazy stub isn't
889 // guaranteed to be close enough to call.
890 return FnStub;
891
892 // If we know the target can handle arbitrary-distance calls, try to
893 // return a direct pointer.
894 if (!MayNeedFarStub) {
895 //
896 // x86_64 architecture may encounter the bug:
897 // http://llvm.org/bugs/show_bug.cgi?id=5201
898 // which generate instruction "call" instead of "callq".
899 //
900 // And once the real address of stub is greater than 64-bit
901 // long, the replacement will truncate to 32-bit resulting a
902 // serious problem.
903#if !defined(__x86_64__)
904 // If this is an external function pointer, we can force the JIT
905 // to 'compile' it, which really just adds it to the map.
906 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
907 return GetPointerToFunction(F, /* AbortOnFailure = */false);
908 // Changing to false because wanting to allow later calls to
909 // mpTJI->relocate() without aborting. For caching purpose
910 }
911#endif
912 }
913
914 // Otherwise, we may need a to emit a stub, and, conservatively, we
915 // always do so.
916 return GetLazyFunctionStub(F);
917 break;
918 }
919 case llvm::Value::GlobalVariableVal: {
920 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
921 break;
922 }
923 case llvm::Value::GlobalAliasVal: {
924 llvm::GlobalAlias *GA = (llvm::GlobalAlias*) V;
925 const llvm::GlobalValue *GV = GA->resolveAliasedGlobal(false);
926
927 switch (GV->getValueID()) {
928 case llvm::Value::FunctionVal: {
929 // TODO(all): is there's any possibility that the function is not
930 // code-gen'd?
931 return GetPointerToFunction(
932 static_cast<const llvm::Function*>(GV),
933 /* AbortOnFailure = */false);
934 // Changing to false because wanting to allow later calls to
935 // mpTJI->relocate() without aborting. For caching purpose
936 break;
937 }
938 case llvm::Value::GlobalVariableVal: {
939 if (void *P = mGlobalAddressMap[GV])
940 return P;
941
942 llvm::GlobalVariable *GVar = (llvm::GlobalVariable*) GV;
943 EmitGlobalVariable(GVar);
944
945 return mGlobalAddressMap[GV];
946 break;
947 }
948 case llvm::Value::GlobalAliasVal: {
949 assert(false && "Alias should be resolved ultimately!");
950 }
951 }
952 break;
953 }
954 default: {
955 break;
956 }
957 }
958 llvm_unreachable("Unknown type of global value!");
959}
960
961
962// If the specified function has been code-gen'd, return a pointer to the
963// function. If not, compile it, or use a stub to implement lazy compilation
964// if available.
965void *CodeEmitter::GetPointerToFunctionOrStub(llvm::Function *F) {
966 // If we have already code generated the function, just return the
967 // address.
968 if (void *Addr = GetPointerToGlobalIfAvailable(F))
969 return Addr;
970
971 // Get a stub if the target supports it.
972 return GetLazyFunctionStub(F);
973}
974
975
976void *CodeEmitter::GetLazyFunctionStub(llvm::Function *F) {
977 // If we already have a lazy stub for this function, recycle it.
978 void *&Stub = mFunctionToLazyStubMap[F];
979 if (Stub)
980 return Stub;
981
982 // In any cases, we should NOT resolve function at runtime (though we are
983 // able to). We resolve this right now.
984 void *Actual = NULL;
985 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
986 Actual = GetPointerToFunction(F, /* AbortOnFailure = */false);
987 // Changing to false because wanting to allow later calls to
988 // mpTJI->relocate() without aborting. For caching purpose
989 }
990
991 // Codegen a new stub, calling the actual address of the external
992 // function, if it was resolved.
993 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
994 startGVStub(F, SL.Size, SL.Alignment);
995 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
996 finishGVStub();
997
998 // We really want the address of the stub in the GlobalAddressMap for the
999 // JIT, not the address of the external function.
1000 UpdateGlobalMapping(F, Stub);
1001
1002 if (!Actual)
1003 PendingFunctions.insert(F);
1004 else
1005 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1006 SL.Size, true);
1007
1008 return Stub;
1009}
1010
1011
1012void *CodeEmitter::GetPointerToFunction(const llvm::Function *F,
1013 bool AbortOnFailure) {
1014 void *Addr = GetPointerToGlobalIfAvailable(F);
1015 if (Addr)
1016 return Addr;
1017
1018 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
1019 "Internal error: only external defined function routes here!");
1020
1021 // Handle the failure resolution by ourselves.
1022 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
1023 /* AbortOnFailure = */ false);
1024
1025 // If we resolved the symbol to a null address (eg. a weak external)
1026 // return a null pointer let the application handle it.
1027 if (Addr == NULL) {
1028 if (AbortOnFailure)
1029 llvm::report_fatal_error("Could not resolve external function "
1030 "address: " + F->getName());
1031 else
1032 return NULL;
1033 }
1034
1035 AddGlobalMapping(F, Addr);
1036
1037 return Addr;
1038}
1039
1040
1041void *CodeEmitter::GetPointerToNamedSymbol(const std::string &Name,
1042 bool AbortOnFailure) {
1043 if (void *Addr = FindRuntimeFunction(Name.c_str()))
1044 return Addr;
1045
1046 if (mpSymbolLookupFn)
1047 if (void *Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1048 return Addr;
1049
1050 if (AbortOnFailure)
1051 llvm::report_fatal_error("Program used external symbol '" + Name +
1052 "' which could not be resolved!");
1053
1054 return NULL;
1055}
1056
1057
1058// Return the address of the specified global variable, possibly emitting it
1059// to memory if needed. This is used by the Emitter.
1060void *CodeEmitter::GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1061 void *Ptr = GetPointerToGlobalIfAvailable(GV);
1062 if (Ptr)
1063 return Ptr;
1064
1065 if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1066 // If the global is external, just remember the address.
1067 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1068 AddGlobalMapping(GV, Ptr);
1069 } else {
1070 // If the global hasn't been emitted to memory yet, allocate space and
1071 // emit it into memory.
1072 Ptr = GetMemoryForGV(GV);
1073 AddGlobalMapping(GV, Ptr);
1074 EmitGlobalVariable(GV);
1075 }
1076
1077 return Ptr;
1078}
1079
1080
1081// This method abstracts memory allocation of global variable so that the
1082// JIT can allocate thread local variables depending on the target.
1083void *CodeEmitter::GetMemoryForGV(const llvm::GlobalVariable *GV) {
1084 void *Ptr;
1085
1086 const llvm::Type *GlobalType = GV->getType()->getElementType();
1087 size_t S = mpTD->getTypeAllocSize(GlobalType);
1088 size_t A = mpTD->getPreferredAlignment(GV);
1089
1090 if (GV->isThreadLocal()) {
1091 // We can support TLS by
1092 //
1093 // Ptr = TJI.allocateThreadLocalMemory(S);
1094 //
1095 // But I tend not to.
1096 // (should we disable this in the front-end (i.e., slang)?).
1097 llvm::report_fatal_error
1098 ("Compilation of Thread Local Storage (TLS) is disabled!");
1099
1100 } else if (mpTJI->allocateSeparateGVMemory()) {
1101 if (A <= 8) {
1102 Ptr = malloc(S);
1103 } else {
1104 // Allocate (S + A) bytes of memory, then use an aligned pointer
1105 // within that space.
1106 Ptr = malloc(S + A);
1107 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1108 Ptr = reinterpret_cast<uint8_t*>(Ptr) +
1109 (MisAligned ? (A - MisAligned) : 0);
1110 }
1111 } else {
1112 Ptr = allocateGlobal(S, A);
1113 }
1114
1115 return Ptr;
1116}
1117
1118
1119void CodeEmitter::EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1120 void *GA = GetPointerToGlobalIfAvailable(GV);
1121
1122 if (GV->isThreadLocal())
1123 llvm::report_fatal_error
1124 ("We don't support Thread Local Storage (TLS)!");
1125
1126 if (GA == NULL) {
1127 // If it's not already specified, allocate memory for the global.
1128 GA = GetMemoryForGV(GV);
1129 AddGlobalMapping(GV, GA);
1130 }
1131
1132 InitializeConstantToMemory(GV->getInitializer(), GA);
1133
1134 // You can do some statistics on global variable here.
1135 return;
1136}
1137
1138
1139void *CodeEmitter::GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1140 // Make sure GV is emitted first, and create a stub containing the fully
1141 // resolved address.
1142 void *GVAddress = GetPointerToGlobal(V, Reference, false);
1143
1144 // If we already have a stub for this global variable, recycle it.
1145 void *&IndirectSym = GlobalToIndirectSymMap[V];
1146 // Otherwise, codegen a new indirect symbol.
1147 if (!IndirectSym)
1148 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1149
1150 return IndirectSym;
1151}
1152
1153
1154// Return a stub for the function at the specified address.
1155void *CodeEmitter::GetExternalFunctionStub(void *FnAddr) {
1156 void *&Stub = ExternalFnToStubMap[FnAddr];
1157 if (Stub)
1158 return Stub;
1159
1160 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1161 startGVStub(0, SL.Size, SL.Alignment);
1162 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1163 finishGVStub();
1164
1165 return Stub;
1166}
1167
1168
1169void CodeEmitter::Disassemble(const llvm::StringRef &Name,
1170 uint8_t *Start, size_t Length, bool IsStub) {
1171
1172#if USE_DISASSEMBLER
1173 llvm::raw_ostream *OS;
1174
1175#if USE_DISASSEMBLER_FILE
1176 std::string ErrorInfo;
1177 OS = new llvm::raw_fd_ostream("/data/local/tmp/out.S",
1178 ErrorInfo,
1179 llvm::raw_fd_ostream::F_Append);
1180
1181 if (!ErrorInfo.empty()) { // some errors occurred
1182 // LOGE("Error in creating disassembly file");
1183 delete OS;
1184 return;
1185 }
1186#else
1187 OS = &llvm::outs();
1188#endif
1189
1190 *OS << "JIT: Disassembled code: " << Name << ((IsStub) ? " (stub)" : "")
1191 << "\n";
1192
1193 if (mpAsmInfo == NULL)
1194 mpAsmInfo = mpTarget->createAsmInfo(Compiler::Triple);
1195 if (mpDisassmbler == NULL)
1196 mpDisassmbler = mpTarget->createMCDisassembler();
1197 if (mpIP == NULL)
Shih-wei Liaocfd23fd2011-04-19 18:02:28 -07001198 mpIP = mpTarget->createMCInstPrinter(*mpTargetMachine,
1199 mpAsmInfo->getAssemblerDialect(),
Shih-wei Liao67d8f372011-01-16 22:48:35 -08001200 *mpAsmInfo);
1201
1202 const BufferMemoryObject *BufferMObj = new BufferMemoryObject(Start,
1203 Length);
1204 uint64_t Size;
1205 uint64_t Index;
1206
1207 for (Index = 0; Index < Length; Index += Size) {
1208 llvm::MCInst Inst;
1209
1210 if (mpDisassmbler->getInstruction(Inst, Size, *BufferMObj, Index,
1211 /* REMOVED */ llvm::nulls())) {
1212 (*OS).indent(4)
1213 .write("0x", 2)
1214 .write_hex((uint32_t) Start + Index)
Stephen Hines13cc0372011-01-24 14:00:51 -08001215 .write(": 0x", 4);
1216 (*OS).write_hex((uint32_t) *(uint32_t*)(Start+Index));
Shih-wei Liao67d8f372011-01-16 22:48:35 -08001217 mpIP->printInst(&Inst, *OS);
1218 *OS << "\n";
1219 } else {
1220 if (Size == 0)
1221 Size = 1; // skip illegible bytes
1222 }
1223 }
1224
1225 *OS << "\n";
1226 delete BufferMObj;
1227
1228#if USE_DISASSEMBLER_FILE
1229 // If you want the disassemble results write to file, uncomment this.
1230 ((llvm::raw_fd_ostream*)OS)->close();
1231 delete OS;
1232#endif
1233
1234#endif // USE_DISASSEMBLER
1235}
1236
1237
1238void CodeEmitter::setTargetMachine(llvm::TargetMachine &TM) {
Shih-wei Liaocfd23fd2011-04-19 18:02:28 -07001239 mpTargetMachine = &TM;
1240
Shih-wei Liao67d8f372011-01-16 22:48:35 -08001241 // Set Target
1242 mpTarget = &TM.getTarget();
1243 // Set TargetJITInfo
1244 mpTJI = TM.getJITInfo();
1245 // set TargetData
1246 mpTD = TM.getTargetData();
1247
1248 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
1249
1250 return;
1251}
1252
1253
1254// This callback is invoked when the specified function is about to be code
1255// generated. This initializes the BufferBegin/End/Ptr fields.
1256void CodeEmitter::startFunction(llvm::MachineFunction &F) {
1257 uintptr_t ActualSize = 0;
1258
1259 mpMemMgr->setMemoryWritable();
1260
1261 // BufferBegin, BufferEnd and CurBufferPtr are all inherited from class
1262 // MachineCodeEmitter, which is the super class of the class
1263 // JITCodeEmitter.
1264 //
1265 // BufferBegin/BufferEnd - Pointers to the start and end of the memory
1266 // allocated for this code buffer.
1267 //
1268 // CurBufferPtr - Pointer to the next byte of memory to fill when emitting
1269 // code. This is guranteed to be in the range
1270 // [BufferBegin, BufferEnd]. If this pointer is at
1271 // BufferEnd, it will never move due to code emission, and
1272 // all code emission requests will be ignored (this is the
1273 // buffer overflow condition).
1274 BufferBegin = CurBufferPtr =
1275 mpMemMgr->startFunctionBody(F.getFunction(), ActualSize);
1276 BufferEnd = BufferBegin + ActualSize;
1277
1278 if (mpCurEmitFunction == NULL) {
1279 mpCurEmitFunction = new FuncInfo(); // TODO(all): Allocation check!
1280 mpCurEmitFunction->name = NULL;
1281 mpCurEmitFunction->addr = NULL;
1282 mpCurEmitFunction->size = 0;
1283 }
1284
1285 // Ensure the constant pool/jump table info is at least 4-byte aligned.
1286 emitAlignment(16);
1287
1288 emitConstantPool(F.getConstantPool());
1289 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1290 initJumpTableInfo(MJTI);
1291
1292 // About to start emitting the machine code for the function.
1293 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
1294
1295 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
1296
1297 mpCurEmitFunction->addr = CurBufferPtr;
1298
1299 mMBBLocations.clear();
1300}
1301
1302
1303// This callback is invoked when the specified function has finished code
1304// generation. If a buffer overflow has occurred, this method returns true
1305// (the callee is required to try again).
1306bool CodeEmitter::finishFunction(llvm::MachineFunction &F) {
1307 if (CurBufferPtr == BufferEnd) {
1308 // No enough memory
1309 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1310 return false;
1311 }
1312
1313 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1314 emitJumpTableInfo(MJTI);
1315
1316 // FnStart is the start of the text, not the start of the constant pool
1317 // and other per-function data.
1318 uint8_t *FnStart =
1319 reinterpret_cast<uint8_t*>(
1320 GetPointerToGlobalIfAvailable(F.getFunction()));
1321
1322 // FnEnd is the end of the function's machine code.
1323 uint8_t *FnEnd = CurBufferPtr;
1324
1325 if (!mRelocations.empty()) {
1326 //ptrdiff_t BufferOffset = BufferBegin - mpMemMgr->getCodeMemBase();
1327
1328 // Resolve the relocations to concrete pointers.
1329 for (int i = 0, e = mRelocations.size(); i != e; i++) {
1330 llvm::MachineRelocation &MR = mRelocations[i];
1331 void *ResultPtr = NULL;
1332
1333 if (!MR.letTargetResolve()) {
1334 if (MR.isExternalSymbol()) {
1335 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
1336
1337 if (MR.mayNeedFarStub()) {
1338 ResultPtr = GetExternalFunctionStub(ResultPtr);
1339 }
1340
1341 } else if (MR.isGlobalValue()) {
1342 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
1343 BufferBegin
1344 + MR.getMachineCodeOffset(),
1345 MR.mayNeedFarStub());
1346 } else if (MR.isIndirectSymbol()) {
1347 ResultPtr =
1348 GetPointerToGVIndirectSym(
1349 MR.getGlobalValue(),
1350 BufferBegin + MR.getMachineCodeOffset());
1351 } else if (MR.isBasicBlock()) {
1352 ResultPtr =
1353 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
1354 } else if (MR.isConstantPoolIndex()) {
1355 ResultPtr =
1356 (void*) getConstantPoolEntryAddress(MR.getConstantPoolIndex());
1357 } else {
1358 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
1359 ResultPtr =
1360 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
1361 }
1362
1363 if (!MR.isExternalSymbol() || MR.mayNeedFarStub()) {
1364 // TODO(logan): Cache external symbol relocation entry.
1365 // Currently, we are not caching them. But since Android
1366 // system is using prelink, it is not a problem.
1367#if 0
1368 // Cache the relocation result address
1369 mCachingRelocations.push_back(
1370 oBCCRelocEntry(MR.getRelocationType(),
1371 MR.getMachineCodeOffset() + BufferOffset,
1372 ResultPtr));
1373#endif
1374 }
1375
1376 MR.setResultPointer(ResultPtr);
1377 }
1378 }
1379
1380 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
1381 mpMemMgr->getGOTBase());
1382 }
1383
1384 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1385 // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
1386 // global variables that were referenced in the relocations.
1387 if (CurBufferPtr == BufferEnd)
1388 return false;
1389
1390 // Now that we've succeeded in emitting the function.
1391 mpCurEmitFunction->size = CurBufferPtr - BufferBegin;
1392 BufferBegin = CurBufferPtr = 0;
1393
1394 if (F.getFunction()->hasName()) {
1395 string const &name = F.getFunction()->getNameStr();
1396 mpResult->mEmittedFunctions[name] = mpCurEmitFunction;
1397 mpCurEmitFunction = NULL;
1398 }
1399
1400 mRelocations.clear();
1401 mConstPoolAddresses.clear();
1402
1403 if (mpMMI)
1404 mpMMI->EndFunction();
1405
1406 updateFunctionStub(F.getFunction());
1407
1408 // Mark code region readable and executable if it's not so already.
1409 mpMemMgr->setMemoryExecutable();
1410
1411 Disassemble(F.getFunction()->getName(), FnStart, FnEnd - FnStart, false);
1412
1413 return false;
1414}
1415
1416
1417void CodeEmitter::startGVStub(const llvm::GlobalValue *GV, unsigned StubSize,
1418 unsigned Alignment) {
1419 mpSavedBufferBegin = BufferBegin;
1420 mpSavedBufferEnd = BufferEnd;
1421 mpSavedCurBufferPtr = CurBufferPtr;
1422
1423 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
1424 Alignment);
1425 BufferEnd = BufferBegin + StubSize + 1;
1426
1427 return;
1428}
1429
1430
1431void CodeEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1432 mpSavedBufferBegin = BufferBegin;
1433 mpSavedBufferEnd = BufferEnd;
1434 mpSavedCurBufferPtr = CurBufferPtr;
1435
1436 BufferBegin = CurBufferPtr = reinterpret_cast<uint8_t *>(Buffer);
1437 BufferEnd = BufferBegin + StubSize + 1;
1438
1439 return;
1440}
1441
1442
1443void CodeEmitter::finishGVStub() {
1444 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1445
1446 // restore
1447 BufferBegin = mpSavedBufferBegin;
1448 BufferEnd = mpSavedBufferEnd;
1449 CurBufferPtr = mpSavedCurBufferPtr;
1450}
1451
1452
1453// Allocates and fills storage for an indirect GlobalValue, and returns the
1454// address.
1455void *CodeEmitter::allocIndirectGV(const llvm::GlobalValue *GV,
1456 const uint8_t *Buffer, size_t Size,
1457 unsigned Alignment) {
1458 uint8_t *IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
1459 memcpy(IndGV, Buffer, Size);
1460 return IndGV;
1461}
1462
1463
1464// Allocate memory for a global. Unlike allocateSpace, this method does not
1465// allocate memory in the current output buffer, because a global may live
1466// longer than the current function.
1467void *CodeEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1468 // Delegate this call through the memory manager.
1469 return mpMemMgr->allocateGlobal(Size, Alignment);
1470}
1471
1472
1473// This should be called by the target when a new basic block is about to be
1474// emitted. This way the MCE knows where the start of the block is, and can
1475// implement getMachineBasicBlockAddress.
1476void CodeEmitter::StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
1477 if (mMBBLocations.size() <= (unsigned) MBB->getNumber())
1478 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
1479 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
1480 return;
1481}
1482
1483
1484// Return the address of the jump table with index @Index in the function
1485// that last called initJumpTableInfo.
1486uintptr_t CodeEmitter::getJumpTableEntryAddress(unsigned Index) const {
1487 const std::vector<llvm::MachineJumpTableEntry> &JT =
1488 mpJumpTable->getJumpTables();
1489
1490 assert((Index < JT.size()) && "Invalid jump table index!");
1491
1492 unsigned int Offset = 0;
1493 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
1494
1495 for (unsigned i = 0; i < Index; i++)
1496 Offset += JT[i].MBBs.size();
1497 Offset *= EntrySize;
1498
1499 return (uintptr_t)(reinterpret_cast<uint8_t*>(mpJumpTableBase) + Offset);
1500}
1501
1502
1503// Return the address of the specified MachineBasicBlock, only usable after
1504// the label for the MBB has been emitted.
1505uintptr_t CodeEmitter::getMachineBasicBlockAddress(
1506 llvm::MachineBasicBlock *MBB) const {
1507 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
1508 mMBBLocations[MBB->getNumber()] &&
1509 "MBB not emitted!");
1510 return mMBBLocations[MBB->getNumber()];
1511}
1512
1513
1514void CodeEmitter::updateFunctionStub(const llvm::Function *F) {
1515 // Get the empty stub we generated earlier.
1516 void *Stub;
1517 std::set<const llvm::Function*>::iterator I = PendingFunctions.find(F);
1518 if (I != PendingFunctions.end())
1519 Stub = mFunctionToLazyStubMap[F];
1520 else
1521 return;
1522
1523 void *Addr = GetPointerToGlobalIfAvailable(F);
1524
1525 assert(Addr != Stub &&
1526 "Function must have non-stub address to be updated.");
1527
1528 // Tell the target jit info to rewrite the stub at the specified address,
1529 // rather than creating a new one.
1530 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1531 startGVStub(Stub, SL.Size);
1532 mpTJI->emitFunctionStub(F, Addr, *this);
1533 finishGVStub();
1534
1535 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1536 SL.Size, true);
1537
1538 PendingFunctions.erase(I);
1539}
1540
1541
1542} // namespace bcc