blob: ff19a69f427747cc9c38a3c2e9870ba099460cf4 [file] [log] [blame]
Logan28325bf2010-11-26 23:27:41 +08001/*
2 * Copyright 2010, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bcc_code_emitter.h"
18
Logan28325bf2010-11-26 23:27:41 +080019#include "bcc_code_mem_manager.h"
20#include "bcc_emitted_func_code.h"
Logan4eea1192010-11-26 23:31:57 +080021#include "bcc_runtime.h"
Logan28325bf2010-11-26 23:27:41 +080022
23#include <bcc/bcc.h>
24#include <bcc/bcc_cache.h>
25
26#include "llvm/ADT/APFloat.h"
27#include "llvm/ADT/APInt.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/StringRef.h"
31
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineConstantPool.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineModuleInfo.h"
36#include "llvm/CodeGen/MachineRelocation.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/JITCodeEmitter.h"
39
40#include "llvm/ExecutionEngine/GenericValue.h"
41
42#include "llvm/MC/MCAsmInfo.h"
43#include "llvm/MC/MCDisassembler.h"
44#include "llvm/MC/MCInst.h"
45#include "llvm/MC/MCInstPrinter.h"
46
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/raw_ostream.h"
49
Logan2037d722010-11-27 14:31:56 +080050#if defined(USE_DISASSEMBLER)
51#include "llvm/Support/MemoryObject.h"
52#endif
53
Logan28325bf2010-11-26 23:27:41 +080054#include "llvm/System/Host.h"
55
56#include "llvm/Target/TargetData.h"
57#include "llvm/Target/TargetMachine.h"
58#include "llvm/Target/TargetRegistry.h"
59#include "llvm/Target/TargetJITInfo.h"
60
61#include "llvm/Constant.h"
62#include "llvm/Constants.h"
63#include "llvm/DerivedTypes.h"
64#include "llvm/Function.h"
65#include "llvm/GlobalAlias.h"
66#include "llvm/GlobalValue.h"
67#include "llvm/GlobalVariable.h"
68#include "llvm/Instruction.h"
69#include "llvm/Type.h"
70
71#include <algorithm>
72#include <vector>
73#include <set>
74#include <string>
75
76#include <stddef.h>
77
78
Logan2037d722010-11-27 14:31:56 +080079namespace {
80
81#if defined(USE_DISASSEMBLER)
82class BufferMemoryObject : public llvm::MemoryObject {
83private:
84 const uint8_t *mBytes;
85 uint64_t mLength;
86
87public:
88 BufferMemoryObject(const uint8_t *Bytes, uint64_t Length)
89 : mBytes(Bytes), mLength(Length) {
90 }
91
92 virtual uint64_t getBase() const { return 0; }
93 virtual uint64_t getExtent() const { return mLength; }
94
95 virtual int readByte(uint64_t Addr, uint8_t *Byte) const {
96 if (Addr > getExtent())
97 return -1;
98 *Byte = mBytes[Addr];
99 return 0;
100 }
101};
102#endif
103
104}; // namespace anonymous
105
106
Logan28325bf2010-11-26 23:27:41 +0800107namespace bcc {
108
109// Will take the ownership of @MemMgr
110CodeEmitter::CodeEmitter(CodeMemoryManager *pMemMgr)
111 : mpMemMgr(pMemMgr),
112 mpTarget(NULL),
113 mpTJI(NULL),
114 mpTD(NULL),
115 mpCurEmitFunction(NULL),
116 mpConstantPool(NULL),
117 mpJumpTable(NULL),
118 mpMMI(NULL),
119#if defined(USE_DISASSEMBLER)
120 mpAsmInfo(NULL),
121 mpDisassmbler(NULL),
122 mpIP(NULL),
123#endif
124 mpSymbolLookupFn(NULL),
125 mpSymbolLookupContext(NULL) {
126}
127
128
129CodeEmitter::~CodeEmitter() {
130 delete mpMemMgr;
131#if defined(USE_DISASSEMBLER)
132 delete mpAsmInfo;
133 delete mpDisassmbler;
134 delete mpIP;
135#endif
136}
137
138
139// Once you finish the compilation on a translation unit, you can call this
140// function to recycle the memory (which is used at compilation time and not
141// needed for runtime).
142//
143// NOTE: You should not call this funtion until the code-gen passes for a
144// given module is done. Otherwise, the results is undefined and may
145// cause the system crash!
146void CodeEmitter::releaseUnnecessary() {
147 mMBBLocations.clear();
148 mLabelLocations.clear();
149 mGlobalAddressMap.clear();
150 mFunctionToLazyStubMap.clear();
151 GlobalToIndirectSymMap.clear();
152 ExternalFnToStubMap.clear();
153 PendingFunctions.clear();
154}
155
156
157void CodeEmitter::reset() {
158 releaseUnnecessary();
159
160 mpSymbolLookupFn = NULL;
161 mpSymbolLookupContext = NULL;
162
163 mpTJI = NULL;
164 mpTD = NULL;
165
166 for (EmittedFunctionsMapTy::iterator I = mEmittedFunctions.begin(),
167 E = mEmittedFunctions.end();
168 I != E;
169 I++)
170 if (I->second != NULL)
171 delete I->second;
172 mEmittedFunctions.clear();
173
174 mpMemMgr->reset();
175}
176
177
178void *CodeEmitter::UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
179 if (Addr == NULL) {
180 // Removing mapping
181 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
182 void *OldVal;
183
184 if (I == mGlobalAddressMap.end()) {
185 OldVal = NULL;
186 } else {
187 OldVal = I->second;
188 mGlobalAddressMap.erase(I);
189 }
190
191 return OldVal;
192 }
193
194 void *&CurVal = mGlobalAddressMap[GV];
195 void *OldVal = CurVal;
196
197 CurVal = Addr;
198
199 return OldVal;
200}
201
202
203unsigned int CodeEmitter::GetConstantPoolSizeInBytes(
204 llvm::MachineConstantPool *MCP) {
205 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
206 MCP->getConstants();
207
208 if (Constants.empty())
209 return 0;
210
211 unsigned int Size = 0;
212 for (int i = 0, e = Constants.size(); i != e; i++) {
213 llvm::MachineConstantPoolEntry CPE = Constants[i];
214 unsigned int AlignMask = CPE.getAlignment() - 1;
215 Size = (Size + AlignMask) & ~AlignMask;
216 const llvm::Type *Ty = CPE.getType();
217 Size += mpTD->getTypeAllocSize(Ty);
218 }
219
220 return Size;
221}
222
223// This function converts a Constant* into a GenericValue. The interesting
224// part is if C is a ConstantExpr.
225void CodeEmitter::GetConstantValue(const llvm::Constant *C,
226 llvm::GenericValue &Result) {
227 if (C->getValueID() == llvm::Value::UndefValueVal)
228 return;
229 else if (C->getValueID() == llvm::Value::ConstantExprVal) {
230 const llvm::ConstantExpr *CE = (llvm::ConstantExpr*) C;
231 const llvm::Constant *Op0 = CE->getOperand(0);
232
233 switch (CE->getOpcode()) {
234 case llvm::Instruction::GetElementPtr: {
235 // Compute the index
236 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
237 CE->op_end());
238 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
239 &Indices[0],
240 Indices.size());
241
242 GetConstantValue(Op0, Result);
243 Result.PointerVal =
244 static_cast<uint8_t*>(Result.PointerVal) + Offset;
245
246 return;
247 }
248 case llvm::Instruction::Trunc: {
249 uint32_t BitWidth =
250 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
251
252 GetConstantValue(Op0, Result);
253 Result.IntVal = Result.IntVal.trunc(BitWidth);
254
255 return;
256 }
257 case llvm::Instruction::ZExt: {
258 uint32_t BitWidth =
259 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
260
261 GetConstantValue(Op0, Result);
262 Result.IntVal = Result.IntVal.zext(BitWidth);
263
264 return;
265 }
266 case llvm::Instruction::SExt: {
267 uint32_t BitWidth =
268 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
269
270 GetConstantValue(Op0, Result);
271 Result.IntVal = Result.IntVal.sext(BitWidth);
272
273 return;
274 }
275 case llvm::Instruction::FPTrunc: {
276 // TODO(all): fixme: long double
277 GetConstantValue(Op0, Result);
278 Result.FloatVal = static_cast<float>(Result.DoubleVal);
279 return;
280 }
281 case llvm::Instruction::FPExt: {
282 // TODO(all): fixme: long double
283 GetConstantValue(Op0, Result);
284 Result.DoubleVal = static_cast<double>(Result.FloatVal);
285 return;
286 }
287 case llvm::Instruction::UIToFP: {
288 GetConstantValue(Op0, Result);
289 if (CE->getType()->isFloatTy())
290 Result.FloatVal =
291 static_cast<float>(Result.IntVal.roundToDouble());
292 else if (CE->getType()->isDoubleTy())
293 Result.DoubleVal = Result.IntVal.roundToDouble();
294 else if (CE->getType()->isX86_FP80Ty()) {
295 const uint64_t zero[] = { 0, 0 };
296 llvm::APFloat apf(llvm::APInt(80, 2, zero));
297 apf.convertFromAPInt(Result.IntVal,
298 false,
299 llvm::APFloat::rmNearestTiesToEven);
300 Result.IntVal = apf.bitcastToAPInt();
301 }
302 return;
303 }
304 case llvm::Instruction::SIToFP: {
305 GetConstantValue(Op0, Result);
306 if (CE->getType()->isFloatTy())
307 Result.FloatVal =
308 static_cast<float>(Result.IntVal.signedRoundToDouble());
309 else if (CE->getType()->isDoubleTy())
310 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
311 else if (CE->getType()->isX86_FP80Ty()) {
312 const uint64_t zero[] = { 0, 0 };
313 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
314 apf.convertFromAPInt(Result.IntVal,
315 true,
316 llvm::APFloat::rmNearestTiesToEven);
317 Result.IntVal = apf.bitcastToAPInt();
318 }
319 return;
320 }
321 // double->APInt conversion handles sign
322 case llvm::Instruction::FPToUI:
323 case llvm::Instruction::FPToSI: {
324 uint32_t BitWidth =
325 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
326
327 GetConstantValue(Op0, Result);
328 if (Op0->getType()->isFloatTy())
329 Result.IntVal =
330 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
331 else if (Op0->getType()->isDoubleTy())
332 Result.IntVal =
333 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal,
334 BitWidth);
335 else if (Op0->getType()->isX86_FP80Ty()) {
336 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
337 uint64_t V;
338 bool Ignored;
339 apf.convertToInteger(&V,
340 BitWidth,
341 CE->getOpcode() == llvm::Instruction::FPToSI,
342 llvm::APFloat::rmTowardZero,
343 &Ignored);
344 Result.IntVal = V; // endian?
345 }
346 return;
347 }
348 case llvm::Instruction::PtrToInt: {
349 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
350
351 GetConstantValue(Op0, Result);
352 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
353 (Result.PointerVal));
354
355 return;
356 }
357 case llvm::Instruction::IntToPtr: {
358 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
359
360 GetConstantValue(Op0, Result);
361 if (PtrWidth != Result.IntVal.getBitWidth())
362 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
363 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
364
365 Result.PointerVal =
366 llvm::PointerTy(
367 static_cast<uintptr_t>(Result.IntVal.getZExtValue()));
368
369 return;
370 }
371 case llvm::Instruction::BitCast: {
372 GetConstantValue(Op0, Result);
373 const llvm::Type *DestTy = CE->getType();
374
375 switch (Op0->getType()->getTypeID()) {
376 case llvm::Type::IntegerTyID: {
377 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
378 if (DestTy->isFloatTy())
379 Result.FloatVal = Result.IntVal.bitsToFloat();
380 else if (DestTy->isDoubleTy())
381 Result.DoubleVal = Result.IntVal.bitsToDouble();
382 break;
383 }
384 case llvm::Type::FloatTyID: {
385 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
386 Result.IntVal.floatToBits(Result.FloatVal);
387 break;
388 }
389 case llvm::Type::DoubleTyID: {
390 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
391 Result.IntVal.doubleToBits(Result.DoubleVal);
392 break;
393 }
394 case llvm::Type::PointerTyID: {
395 assert(DestTy->isPointerTy() && "Invalid bitcast");
396 break; // getConstantValue(Op0) above already converted it
397 }
398 default: {
399 llvm_unreachable("Invalid bitcast operand");
400 }
401 }
402 return;
403 }
404 case llvm::Instruction::Add:
405 case llvm::Instruction::FAdd:
406 case llvm::Instruction::Sub:
407 case llvm::Instruction::FSub:
408 case llvm::Instruction::Mul:
409 case llvm::Instruction::FMul:
410 case llvm::Instruction::UDiv:
411 case llvm::Instruction::SDiv:
412 case llvm::Instruction::URem:
413 case llvm::Instruction::SRem:
414 case llvm::Instruction::And:
415 case llvm::Instruction::Or:
416 case llvm::Instruction::Xor: {
417 llvm::GenericValue LHS, RHS;
418 GetConstantValue(Op0, LHS);
419 GetConstantValue(CE->getOperand(1), RHS);
420
421 switch (Op0->getType()->getTypeID()) {
422 case llvm::Type::IntegerTyID: {
423 switch (CE->getOpcode()) {
424 case llvm::Instruction::Add: {
425 Result.IntVal = LHS.IntVal + RHS.IntVal;
426 break;
427 }
428 case llvm::Instruction::Sub: {
429 Result.IntVal = LHS.IntVal - RHS.IntVal;
430 break;
431 }
432 case llvm::Instruction::Mul: {
433 Result.IntVal = LHS.IntVal * RHS.IntVal;
434 break;
435 }
436 case llvm::Instruction::UDiv: {
437 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
438 break;
439 }
440 case llvm::Instruction::SDiv: {
441 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
442 break;
443 }
444 case llvm::Instruction::URem: {
445 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
446 break;
447 }
448 case llvm::Instruction::SRem: {
449 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
450 break;
451 }
452 case llvm::Instruction::And: {
453 Result.IntVal = LHS.IntVal & RHS.IntVal;
454 break;
455 }
456 case llvm::Instruction::Or: {
457 Result.IntVal = LHS.IntVal | RHS.IntVal;
458 break;
459 }
460 case llvm::Instruction::Xor: {
461 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
462 break;
463 }
464 default: {
465 llvm_unreachable("Invalid integer opcode");
466 }
467 }
468 break;
469 }
470 case llvm::Type::FloatTyID: {
471 switch (CE->getOpcode()) {
472 case llvm::Instruction::FAdd: {
473 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
474 break;
475 }
476 case llvm::Instruction::FSub: {
477 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
478 break;
479 }
480 case llvm::Instruction::FMul: {
481 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
482 break;
483 }
484 case llvm::Instruction::FDiv: {
485 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
486 break;
487 }
488 case llvm::Instruction::FRem: {
489 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
490 break;
491 }
492 default: {
493 llvm_unreachable("Invalid float opcode");
494 }
495 }
496 break;
497 }
498 case llvm::Type::DoubleTyID: {
499 switch (CE->getOpcode()) {
500 case llvm::Instruction::FAdd: {
501 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
502 break;
503 }
504 case llvm::Instruction::FSub: {
505 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
506 break;
507 }
508 case llvm::Instruction::FMul: {
509 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
510 break;
511 }
512 case llvm::Instruction::FDiv: {
513 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
514 break;
515 }
516 case llvm::Instruction::FRem: {
517 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
518 break;
519 }
520 default: {
521 llvm_unreachable("Invalid double opcode");
522 }
523 }
524 break;
525 }
526 case llvm::Type::X86_FP80TyID:
527 case llvm::Type::PPC_FP128TyID:
528 case llvm::Type::FP128TyID: {
529 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
530 switch (CE->getOpcode()) {
531 case llvm::Instruction::FAdd: {
532 apfLHS.add(llvm::APFloat(RHS.IntVal),
533 llvm::APFloat::rmNearestTiesToEven);
534 break;
535 }
536 case llvm::Instruction::FSub: {
537 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
538 llvm::APFloat::rmNearestTiesToEven);
539 break;
540 }
541 case llvm::Instruction::FMul: {
542 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
543 llvm::APFloat::rmNearestTiesToEven);
544 break;
545 }
546 case llvm::Instruction::FDiv: {
547 apfLHS.divide(llvm::APFloat(RHS.IntVal),
548 llvm::APFloat::rmNearestTiesToEven);
549 break;
550 }
551 case llvm::Instruction::FRem: {
552 apfLHS.mod(llvm::APFloat(RHS.IntVal),
553 llvm::APFloat::rmNearestTiesToEven);
554 break;
555 }
556 default: {
557 llvm_unreachable("Invalid long double opcode");
558 }
559 }
560 Result.IntVal = apfLHS.bitcastToAPInt();
561 break;
562 }
563 default: {
564 llvm_unreachable("Bad add type!");
565 }
566 } // End switch (Op0->getType()->getTypeID())
567 return;
568 }
569 default: {
570 break;
571 }
572 } // End switch (CE->getOpcode())
573
574 std::string msg;
575 llvm::raw_string_ostream Msg(msg);
576 Msg << "ConstantExpr not handled: " << *CE;
577 llvm::report_fatal_error(Msg.str());
578 } // C->getValueID() == llvm::Value::ConstantExprVal
579
580 switch (C->getType()->getTypeID()) {
581 case llvm::Type::FloatTyID: {
582 Result.FloatVal =
583 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToFloat();
584 break;
585 }
586 case llvm::Type::DoubleTyID: {
587 Result.DoubleVal =
588 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToDouble();
589 break;
590 }
591 case llvm::Type::X86_FP80TyID:
592 case llvm::Type::FP128TyID:
593 case llvm::Type::PPC_FP128TyID: {
594 Result.IntVal =
595 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().bitcastToAPInt();
596 break;
597 }
598 case llvm::Type::IntegerTyID: {
599 Result.IntVal =
600 llvm::cast<llvm::ConstantInt>(C)->getValue();
601 break;
602 }
603 case llvm::Type::PointerTyID: {
604 switch (C->getValueID()) {
605 case llvm::Value::ConstantPointerNullVal: {
606 Result.PointerVal = NULL;
607 break;
608 }
609 case llvm::Value::FunctionVal: {
610 const llvm::Function *F = static_cast<const llvm::Function*>(C);
611 Result.PointerVal =
612 GetPointerToFunctionOrStub(const_cast<llvm::Function*>(F));
613 break;
614 }
615 case llvm::Value::GlobalVariableVal: {
616 const llvm::GlobalVariable *GV =
617 static_cast<const llvm::GlobalVariable*>(C);
618 Result.PointerVal =
619 GetOrEmitGlobalVariable(const_cast<llvm::GlobalVariable*>(GV));
620 break;
621 }
622 case llvm::Value::BlockAddressVal: {
623 assert(false && "JIT does not support address-of-label yet!");
624 }
625 default: {
626 llvm_unreachable("Unknown constant pointer type!");
627 }
628 }
629 break;
630 }
631 default: {
632 std::string msg;
633 llvm::raw_string_ostream Msg(msg);
634 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
635 llvm::report_fatal_error(Msg.str());
636 break;
637 }
638 }
639 return;
640}
641
642
643// Stores the data in @Val of type @Ty at address @Addr.
644void CodeEmitter::StoreValueToMemory(const llvm::GenericValue &Val,
645 void *Addr,
646 const llvm::Type *Ty) {
647 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
648
649 switch (Ty->getTypeID()) {
650 case llvm::Type::IntegerTyID: {
651 const llvm::APInt &IntVal = Val.IntVal;
652 assert(((IntVal.getBitWidth() + 7) / 8 >= StoreBytes) &&
653 "Integer too small!");
654
655 const uint8_t *Src =
656 reinterpret_cast<const uint8_t*>(IntVal.getRawData());
657
658 if (llvm::sys::isLittleEndianHost()) {
659 // Little-endian host - the source is ordered from LSB to MSB.
660 // Order the destination from LSB to MSB: Do a straight copy.
661 memcpy(Addr, Src, StoreBytes);
662 } else {
663 // Big-endian host - the source is an array of 64 bit words
664 // ordered from LSW to MSW.
665 //
666 // Each word is ordered from MSB to LSB.
667 //
668 // Order the destination from MSB to LSB:
669 // Reverse the word order, but not the bytes in a word.
670 unsigned int i = StoreBytes;
671 while (i > sizeof(uint64_t)) {
672 i -= sizeof(uint64_t);
673 ::memcpy(reinterpret_cast<uint8_t*>(Addr) + i,
674 Src,
675 sizeof(uint64_t));
676 Src += sizeof(uint64_t);
677 }
678 ::memcpy(Addr, Src + sizeof(uint64_t) - i, i);
679 }
680 break;
681 }
682 case llvm::Type::FloatTyID: {
683 *reinterpret_cast<float*>(Addr) = Val.FloatVal;
684 break;
685 }
686 case llvm::Type::DoubleTyID: {
687 *reinterpret_cast<double*>(Addr) = Val.DoubleVal;
688 break;
689 }
690 case llvm::Type::X86_FP80TyID: {
691 memcpy(Addr, Val.IntVal.getRawData(), 10);
692 break;
693 }
694 case llvm::Type::PointerTyID: {
695 // Ensure 64 bit target pointers are fully initialized on 32 bit
696 // hosts.
697 if (StoreBytes != sizeof(llvm::PointerTy))
698 memset(Addr, 0, StoreBytes);
699 *((llvm::PointerTy*) Addr) = Val.PointerVal;
700 break;
701 }
702 default: {
703 break;
704 }
705 }
706
707 if (llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
708 std::reverse(reinterpret_cast<uint8_t*>(Addr),
709 reinterpret_cast<uint8_t*>(Addr) + StoreBytes);
710
711 return;
712}
713
714
715// Recursive function to apply a @Constant value into the specified memory
716// location @Addr.
717void CodeEmitter::InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
718 switch (C->getValueID()) {
719 case llvm::Value::UndefValueVal: {
720 // Nothing to do
721 break;
722 }
723 case llvm::Value::ConstantVectorVal: {
724 // dynamic cast may hurt performance
725 const llvm::ConstantVector *CP = (llvm::ConstantVector*) C;
726
727 unsigned int ElementSize = mpTD->getTypeAllocSize
728 (CP->getType()->getElementType());
729
730 for (int i = 0, e = CP->getNumOperands(); i != e;i++)
731 InitializeConstantToMemory(
732 CP->getOperand(i),
733 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
734 break;
735 }
736 case llvm::Value::ConstantAggregateZeroVal: {
737 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
738 break;
739 }
740 case llvm::Value::ConstantArrayVal: {
741 const llvm::ConstantArray *CPA = (llvm::ConstantArray*) C;
742 unsigned int ElementSize = mpTD->getTypeAllocSize
743 (CPA->getType()->getElementType());
744
745 for (int i = 0, e = CPA->getNumOperands(); i != e; i++)
746 InitializeConstantToMemory(
747 CPA->getOperand(i),
748 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
749 break;
750 }
751 case llvm::Value::ConstantStructVal: {
752 const llvm::ConstantStruct *CPS =
753 static_cast<const llvm::ConstantStruct*>(C);
754 const llvm::StructLayout *SL = mpTD->getStructLayout
755 (llvm::cast<llvm::StructType>(CPS->getType()));
756
757 for (int i = 0, e = CPS->getNumOperands(); i != e; i++)
758 InitializeConstantToMemory(
759 CPS->getOperand(i),
760 reinterpret_cast<uint8_t*>(Addr) + SL->getElementOffset(i));
761 break;
762 }
763 default: {
764 if (C->getType()->isFirstClassType()) {
765 llvm::GenericValue Val;
766 GetConstantValue(C, Val);
767 StoreValueToMemory(Val, Addr, C->getType());
768 } else {
769 llvm_unreachable("Unknown constant type to initialize memory "
770 "with!");
771 }
772 break;
773 }
774 }
775 return;
776}
777
778
779void CodeEmitter::emitConstantPool(llvm::MachineConstantPool *MCP) {
780 if (mpTJI->hasCustomConstantPool())
781 return;
782
783 // Constant pool address resolution is handled by the target itself in ARM
784 // (TargetJITInfo::hasCustomConstantPool() returns true).
785#if !defined(PROVIDE_ARM_CODEGEN)
786 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
787 MCP->getConstants();
788
789 if (Constants.empty())
790 return;
791
792 unsigned Size = GetConstantPoolSizeInBytes(MCP);
793 unsigned Align = MCP->getConstantPoolAlignment();
794
795 mpConstantPoolBase = allocateSpace(Size, Align);
796 mpConstantPool = MCP;
797
798 if (mpConstantPoolBase == NULL)
799 return; // out of memory
800
801 unsigned Offset = 0;
802 for (int i = 0, e = Constants.size(); i != e; i++) {
803 llvm::MachineConstantPoolEntry CPE = Constants[i];
804 unsigned AlignMask = CPE.getAlignment() - 1;
805 Offset = (Offset + AlignMask) & ~AlignMask;
806
807 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
808 mConstPoolAddresses.push_back(CAddr);
809
810 if (CPE.isMachineConstantPoolEntry())
811 llvm::report_fatal_error
812 ("Initialize memory with machine specific constant pool"
813 " entry has not been implemented!");
814
815 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
816
817 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
818 Offset += mpTD->getTypeAllocSize(Ty);
819 }
820#endif
821 return;
822}
823
824
825void CodeEmitter::initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
826 if (mpTJI->hasCustomJumpTables())
827 return;
828
829 const std::vector<llvm::MachineJumpTableEntry> &JT =
830 MJTI->getJumpTables();
831 if (JT.empty())
832 return;
833
834 unsigned NumEntries = 0;
835 for (int i = 0, e = JT.size(); i != e; i++)
836 NumEntries += JT[i].MBBs.size();
837
838 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
839
840 mpJumpTable = MJTI;
841 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
842 MJTI->getEntryAlignment(*mpTD));
843
844 return;
845}
846
847
848void CodeEmitter::emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
849 if (mpTJI->hasCustomJumpTables())
850 return;
851
852 const std::vector<llvm::MachineJumpTableEntry> &JT =
853 MJTI->getJumpTables();
854 if (JT.empty() || mpJumpTableBase == 0)
855 return;
856
857 assert(llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static &&
858 (MJTI->getEntrySize(*mpTD) == sizeof(mpTD /* a pointer type */)) &&
859 "Cross JIT'ing?");
860
861 // For each jump table, map each target in the jump table to the
862 // address of an emitted MachineBasicBlock.
863 intptr_t *SlotPtr = reinterpret_cast<intptr_t*>(mpJumpTableBase);
864 for (int i = 0, ie = JT.size(); i != ie; i++) {
865 const std::vector<llvm::MachineBasicBlock*> &MBBs = JT[i].MBBs;
866 // Store the address of the basic block for this jump table slot in the
867 // memory we allocated for the jump table in 'initJumpTableInfo'
868 for (int j = 0, je = MBBs.size(); j != je; j++)
869 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
870 }
871}
872
873
874void *CodeEmitter::GetPointerToGlobal(llvm::GlobalValue *V,
875 void *Reference,
876 bool MayNeedFarStub) {
877 switch (V->getValueID()) {
878 case llvm::Value::FunctionVal: {
879 llvm::Function *F = (llvm::Function*) V;
880
881 // If we have code, go ahead and return that.
882 if (void *ResultPtr = GetPointerToGlobalIfAvailable(F))
883 return ResultPtr;
884
885 if (void *FnStub = GetLazyFunctionStubIfAvailable(F))
886 // Return the function stub if it's already created.
887 // We do this first so that:
888 // we're returning the same address for the function as any
889 // previous call.
890 //
891 // TODO(llvm.org): Yes, this is wrong. The lazy stub isn't
892 // guaranteed to be close enough to call.
893 return FnStub;
894
895 // If we know the target can handle arbitrary-distance calls, try to
896 // return a direct pointer.
897 if (!MayNeedFarStub) {
898 //
899 // x86_64 architecture may encounter the bug:
900 // http://llvm.org/bugs/show_bug.cgi?id=5201
901 // which generate instruction "call" instead of "callq".
902 //
903 // And once the real address of stub is greater than 64-bit
904 // long, the replacement will truncate to 32-bit resulting a
905 // serious problem.
906#if !defined(__x86_64__)
907 // If this is an external function pointer, we can force the JIT
908 // to 'compile' it, which really just adds it to the map.
909 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
910 return GetPointerToFunction(F, /* AbortOnFailure = */false);
911 // Changing to false because wanting to allow later calls to
912 // mpTJI->relocate() without aborting. For caching purpose
913 }
914#endif
915 }
916
917 // Otherwise, we may need a to emit a stub, and, conservatively, we
918 // always do so.
919 return GetLazyFunctionStub(F);
920 break;
921 }
922 case llvm::Value::GlobalVariableVal: {
923 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
924 break;
925 }
926 case llvm::Value::GlobalAliasVal: {
927 llvm::GlobalAlias *GA = (llvm::GlobalAlias*) V;
928 const llvm::GlobalValue *GV = GA->resolveAliasedGlobal(false);
929
930 switch (GV->getValueID()) {
931 case llvm::Value::FunctionVal: {
932 // TODO(all): is there's any possibility that the function is not
933 // code-gen'd?
934 return GetPointerToFunction(
935 static_cast<const llvm::Function*>(GV),
936 /* AbortOnFailure = */false);
937 // Changing to false because wanting to allow later calls to
938 // mpTJI->relocate() without aborting. For caching purpose
939 break;
940 }
941 case llvm::Value::GlobalVariableVal: {
942 if (void *P = mGlobalAddressMap[GV])
943 return P;
944
945 llvm::GlobalVariable *GVar = (llvm::GlobalVariable*) GV;
946 EmitGlobalVariable(GVar);
947
948 return mGlobalAddressMap[GV];
949 break;
950 }
951 case llvm::Value::GlobalAliasVal: {
952 assert(false && "Alias should be resolved ultimately!");
953 }
954 }
955 break;
956 }
957 default: {
958 break;
959 }
960 }
961 llvm_unreachable("Unknown type of global value!");
962}
963
964
965// If the specified function has been code-gen'd, return a pointer to the
966// function. If not, compile it, or use a stub to implement lazy compilation
967// if available.
968void *CodeEmitter::GetPointerToFunctionOrStub(llvm::Function *F) {
969 // If we have already code generated the function, just return the
970 // address.
971 if (void *Addr = GetPointerToGlobalIfAvailable(F))
972 return Addr;
973
974 // Get a stub if the target supports it.
975 return GetLazyFunctionStub(F);
976}
977
978
979void *CodeEmitter::GetLazyFunctionStub(llvm::Function *F) {
980 // If we already have a lazy stub for this function, recycle it.
981 void *&Stub = mFunctionToLazyStubMap[F];
982 if (Stub)
983 return Stub;
984
985 // In any cases, we should NOT resolve function at runtime (though we are
986 // able to). We resolve this right now.
987 void *Actual = NULL;
988 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
989 Actual = GetPointerToFunction(F, /* AbortOnFailure = */false);
990 // Changing to false because wanting to allow later calls to
991 // mpTJI->relocate() without aborting. For caching purpose
992 }
993
994 // Codegen a new stub, calling the actual address of the external
995 // function, if it was resolved.
996 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
997 startGVStub(F, SL.Size, SL.Alignment);
998 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
999 finishGVStub();
1000
1001 // We really want the address of the stub in the GlobalAddressMap for the
1002 // JIT, not the address of the external function.
1003 UpdateGlobalMapping(F, Stub);
1004
1005 if (!Actual)
1006 PendingFunctions.insert(F);
1007 else
1008 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1009 SL.Size, true);
1010
1011 return Stub;
1012}
1013
1014
1015void *CodeEmitter::GetPointerToFunction(const llvm::Function *F,
1016 bool AbortOnFailure) {
1017 void *Addr = GetPointerToGlobalIfAvailable(F);
1018 if (Addr)
1019 return Addr;
1020
1021 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
1022 "Internal error: only external defined function routes here!");
1023
1024 // Handle the failure resolution by ourselves.
1025 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
1026 /* AbortOnFailure = */ false);
1027
1028 // If we resolved the symbol to a null address (eg. a weak external)
1029 // return a null pointer let the application handle it.
1030 if (Addr == NULL) {
1031 if (AbortOnFailure)
1032 llvm::report_fatal_error("Could not resolve external function "
1033 "address: " + F->getName());
1034 else
1035 return NULL;
1036 }
1037
1038 AddGlobalMapping(F, Addr);
1039
1040 return Addr;
1041}
1042
1043
1044void *CodeEmitter::GetPointerToNamedSymbol(const std::string &Name,
1045 bool AbortOnFailure) {
1046 if (void *Addr = FindRuntimeFunction(Name.c_str()))
1047 return Addr;
1048
1049 if (mpSymbolLookupFn)
1050 if (void *Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1051 return Addr;
1052
1053 if (AbortOnFailure)
1054 llvm::report_fatal_error("Program used external symbol '" + Name +
1055 "' which could not be resolved!");
1056
1057 return NULL;
1058}
1059
1060
1061// Return the address of the specified global variable, possibly emitting it
1062// to memory if needed. This is used by the Emitter.
1063void *CodeEmitter::GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1064 void *Ptr = GetPointerToGlobalIfAvailable(GV);
1065 if (Ptr)
1066 return Ptr;
1067
1068 if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1069 // If the global is external, just remember the address.
1070 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1071 AddGlobalMapping(GV, Ptr);
1072 } else {
1073 // If the global hasn't been emitted to memory yet, allocate space and
1074 // emit it into memory.
1075 Ptr = GetMemoryForGV(GV);
1076 AddGlobalMapping(GV, Ptr);
1077 EmitGlobalVariable(GV);
1078 }
1079
1080 return Ptr;
1081}
1082
1083
1084// This method abstracts memory allocation of global variable so that the
1085// JIT can allocate thread local variables depending on the target.
1086void *CodeEmitter::GetMemoryForGV(const llvm::GlobalVariable *GV) {
1087 void *Ptr;
1088
1089 const llvm::Type *GlobalType = GV->getType()->getElementType();
1090 size_t S = mpTD->getTypeAllocSize(GlobalType);
1091 size_t A = mpTD->getPreferredAlignment(GV);
1092
1093 if (GV->isThreadLocal()) {
1094 // We can support TLS by
1095 //
1096 // Ptr = TJI.allocateThreadLocalMemory(S);
1097 //
1098 // But I tend not to.
1099 // (should we disable this in the front-end (i.e., slang)?).
1100 llvm::report_fatal_error
1101 ("Compilation of Thread Local Storage (TLS) is disabled!");
1102
1103 } else if (mpTJI->allocateSeparateGVMemory()) {
1104 if (A <= 8) {
1105 Ptr = malloc(S);
1106 } else {
1107 // Allocate (S + A) bytes of memory, then use an aligned pointer
1108 // within that space.
1109 Ptr = malloc(S + A);
1110 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1111 Ptr = reinterpret_cast<uint8_t*>(Ptr) +
1112 (MisAligned ? (A - MisAligned) : 0);
1113 }
1114 } else {
1115 Ptr = allocateGlobal(S, A);
1116 }
1117
1118 return Ptr;
1119}
1120
1121
1122void CodeEmitter::EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1123 void *GA = GetPointerToGlobalIfAvailable(GV);
1124
1125 if (GV->isThreadLocal())
1126 llvm::report_fatal_error
1127 ("We don't support Thread Local Storage (TLS)!");
1128
1129 if (GA == NULL) {
1130 // If it's not already specified, allocate memory for the global.
1131 GA = GetMemoryForGV(GV);
1132 AddGlobalMapping(GV, GA);
1133 }
1134
1135 InitializeConstantToMemory(GV->getInitializer(), GA);
1136
1137 // You can do some statistics on global variable here.
1138 return;
1139}
1140
1141
1142void *CodeEmitter::GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1143 // Make sure GV is emitted first, and create a stub containing the fully
1144 // resolved address.
1145 void *GVAddress = GetPointerToGlobal(V, Reference, false);
1146
1147 // If we already have a stub for this global variable, recycle it.
1148 void *&IndirectSym = GlobalToIndirectSymMap[V];
1149 // Otherwise, codegen a new indirect symbol.
1150 if (!IndirectSym)
1151 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1152
1153 return IndirectSym;
1154}
1155
1156
1157// Return a stub for the function at the specified address.
1158void *CodeEmitter::GetExternalFunctionStub(void *FnAddr) {
1159 void *&Stub = ExternalFnToStubMap[FnAddr];
1160 if (Stub)
1161 return Stub;
1162
1163 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1164 startGVStub(0, SL.Size, SL.Alignment);
1165 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1166 finishGVStub();
1167
1168 return Stub;
1169}
1170
1171
1172#if defined(USE_DISASSEMBLER)
1173void CodeEmitter::Disassemble(const llvm::StringRef &Name,
1174 uint8_t *Start, size_t Length, bool IsStub) {
1175 llvm::raw_ostream *OS;
1176
1177#if defined(USE_DISASSEMBLER_FILE)
1178 std::string ErrorInfo;
1179 OS = new llvm::raw_fd_ostream("/data/local/tmp/out.S",
1180 ErrorInfo,
1181 llvm::raw_fd_ostream::F_Append);
1182 if (!ErrorInfo.empty()) { // some errors occurred
1183 // LOGE("Error in creating disassembly file");
1184 delete OS;
1185 return;
1186 }
1187#else
1188 OS = &llvm::outs();
1189#endif
1190
1191 *OS << "JIT: Disassembled code: " << Name << ((IsStub) ? " (stub)" : "")
1192 << "\n";
1193
1194 if (mpAsmInfo == NULL)
Loganf2b79d02010-11-27 01:07:53 +08001195 mpAsmInfo = mpTarget->createAsmInfo(Compiler::Triple);
Logan28325bf2010-11-26 23:27:41 +08001196 if (mpDisassmbler == NULL)
1197 mpDisassmbler = mpTarget->createMCDisassembler();
1198 if (mpIP == NULL)
1199 mpIP = mpTarget->createMCInstPrinter(mpAsmInfo->getAssemblerDialect(),
1200 *mpAsmInfo);
1201
1202 const BufferMemoryObject *BufferMObj = new BufferMemoryObject(Start,
1203 Length);
1204 uint64_t Size;
1205 uint64_t Index;
1206
1207 for (Index = 0; Index < Length; Index += Size) {
1208 llvm::MCInst Inst;
1209
1210 if (mpDisassmbler->getInstruction(Inst, Size, *BufferMObj, Index,
1211 /* REMOVED */ llvm::nulls())) {
1212 (*OS).indent(4)
1213 .write("0x", 2)
1214 .write_hex((uint32_t) Start + Index)
1215 .write(':');
1216 mpIP->printInst(&Inst, *OS);
1217 *OS << "\n";
1218 } else {
1219 if (Size == 0)
1220 Size = 1; // skip illegible bytes
1221 }
1222 }
1223
1224 *OS << "\n";
1225 delete BufferMObj;
1226
1227#if defined(USE_DISASSEMBLER_FILE)
1228 // If you want the disassemble results write to file, uncomment this.
1229 OS->close();
1230 delete OS;
1231#endif
1232
1233 return;
1234}
1235#endif // defined(USE_DISASSEMBLER)
1236
1237
1238void CodeEmitter::setTargetMachine(llvm::TargetMachine &TM) {
1239 // Set Target
1240 mpTarget = &TM.getTarget();
1241 // Set TargetJITInfo
1242 mpTJI = TM.getJITInfo();
1243 // set TargetData
1244 mpTD = TM.getTargetData();
1245
1246 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
1247
1248 return;
1249}
1250
1251
1252// This callback is invoked when the specified function is about to be code
1253// generated. This initializes the BufferBegin/End/Ptr fields.
1254void CodeEmitter::startFunction(llvm::MachineFunction &F) {
1255 uintptr_t ActualSize = 0;
1256
1257 mpMemMgr->setMemoryWritable();
1258
1259 // BufferBegin, BufferEnd and CurBufferPtr are all inherited from class
1260 // MachineCodeEmitter, which is the super class of the class
1261 // JITCodeEmitter.
1262 //
1263 // BufferBegin/BufferEnd - Pointers to the start and end of the memory
1264 // allocated for this code buffer.
1265 //
1266 // CurBufferPtr - Pointer to the next byte of memory to fill when emitting
1267 // code. This is guranteed to be in the range
1268 // [BufferBegin, BufferEnd]. If this pointer is at
1269 // BufferEnd, it will never move due to code emission, and
1270 // all code emission requests will be ignored (this is the
1271 // buffer overflow condition).
1272 BufferBegin = CurBufferPtr =
1273 mpMemMgr->startFunctionBody(F.getFunction(), ActualSize);
1274 BufferEnd = BufferBegin + ActualSize;
1275
1276 if (mpCurEmitFunction == NULL)
1277 mpCurEmitFunction = new EmittedFunctionCode();
1278 mpCurEmitFunction->FunctionBody = BufferBegin;
1279
1280 // Ensure the constant pool/jump table info is at least 4-byte aligned.
1281 emitAlignment(16);
1282
1283 emitConstantPool(F.getConstantPool());
1284 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1285 initJumpTableInfo(MJTI);
1286
1287 // About to start emitting the machine code for the function.
1288 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
1289
1290 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
1291
1292 mpCurEmitFunction->Code = CurBufferPtr;
1293
1294 mMBBLocations.clear();
1295}
1296
1297
1298// This callback is invoked when the specified function has finished code
1299// generation. If a buffer overflow has occurred, this method returns true
1300// (the callee is required to try again).
1301bool CodeEmitter::finishFunction(llvm::MachineFunction &F) {
1302 if (CurBufferPtr == BufferEnd) {
1303 // No enough memory
1304 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1305 return false;
1306 }
1307
1308 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1309 emitJumpTableInfo(MJTI);
1310
1311 // FnStart is the start of the text, not the start of the constant pool
1312 // and other per-function data.
1313 uint8_t *FnStart =
1314 reinterpret_cast<uint8_t*>(
1315 GetPointerToGlobalIfAvailable(F.getFunction()));
1316
1317 // FnEnd is the end of the function's machine code.
1318 uint8_t *FnEnd = CurBufferPtr;
1319
1320 if (!mRelocations.empty()) {
1321 ptrdiff_t BufferOffset = BufferBegin - mpMemMgr->getCodeMemBase();
1322
1323 // Resolve the relocations to concrete pointers.
1324 for (int i = 0, e = mRelocations.size(); i != e; i++) {
1325 llvm::MachineRelocation &MR = mRelocations[i];
1326 void *ResultPtr = NULL;
1327
1328 if (!MR.letTargetResolve()) {
1329 if (MR.isExternalSymbol()) {
1330 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
1331
1332 if (MR.mayNeedFarStub()) {
1333 ResultPtr = GetExternalFunctionStub(ResultPtr);
1334 }
1335
1336 } else if (MR.isGlobalValue()) {
1337 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
1338 BufferBegin
1339 + MR.getMachineCodeOffset(),
1340 MR.mayNeedFarStub());
1341 } else if (MR.isIndirectSymbol()) {
1342 ResultPtr =
1343 GetPointerToGVIndirectSym(
1344 MR.getGlobalValue(),
1345 BufferBegin + MR.getMachineCodeOffset());
1346 } else if (MR.isBasicBlock()) {
1347 ResultPtr =
1348 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
1349 } else if (MR.isConstantPoolIndex()) {
1350 ResultPtr =
1351 (void*) getConstantPoolEntryAddress(MR.getConstantPoolIndex());
1352 } else {
1353 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
1354 ResultPtr =
1355 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
1356 }
1357
1358 if (!MR.isExternalSymbol() || MR.mayNeedFarStub()) {
1359 // TODO(logan): Cache external symbol relocation entry.
1360 // Currently, we are not caching them. But since Android
1361 // system is using prelink, it is not a problem.
1362
1363 // Cache the relocation result address
1364 mCachingRelocations.push_back(
1365 oBCCRelocEntry(MR.getRelocationType(),
1366 MR.getMachineCodeOffset() + BufferOffset,
1367 ResultPtr));
1368 }
1369
1370 MR.setResultPointer(ResultPtr);
1371 }
1372 }
1373
1374 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
1375 mpMemMgr->getGOTBase());
1376 }
1377
1378 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1379 // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
1380 // global variables that were referenced in the relocations.
1381 if (CurBufferPtr == BufferEnd)
1382 return false;
1383
1384 // Now that we've succeeded in emitting the function.
1385 mpCurEmitFunction->Size = CurBufferPtr - BufferBegin;
1386 BufferBegin = CurBufferPtr = 0;
1387
1388 if (F.getFunction()->hasName())
1389 mEmittedFunctions[F.getFunction()->getNameStr()] = mpCurEmitFunction;
1390 mpCurEmitFunction = NULL;
1391
1392 mRelocations.clear();
1393 mConstPoolAddresses.clear();
1394
1395 if (mpMMI)
1396 mpMMI->EndFunction();
1397
1398 updateFunctionStub(F.getFunction());
1399
1400 // Mark code region readable and executable if it's not so already.
1401 mpMemMgr->setMemoryExecutable();
1402
1403 Disassemble(F.getFunction()->getName(), FnStart, FnEnd - FnStart, false);
1404
1405 return false;
1406}
1407
1408
1409void CodeEmitter::startGVStub(const llvm::GlobalValue *GV, unsigned StubSize,
1410 unsigned Alignment) {
1411 mpSavedBufferBegin = BufferBegin;
1412 mpSavedBufferEnd = BufferEnd;
1413 mpSavedCurBufferPtr = CurBufferPtr;
1414
1415 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
1416 Alignment);
1417 BufferEnd = BufferBegin + StubSize + 1;
1418
1419 return;
1420}
1421
1422
1423void CodeEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1424 mpSavedBufferBegin = BufferBegin;
1425 mpSavedBufferEnd = BufferEnd;
1426 mpSavedCurBufferPtr = CurBufferPtr;
1427
1428 BufferBegin = CurBufferPtr = reinterpret_cast<uint8_t *>(Buffer);
1429 BufferEnd = BufferBegin + StubSize + 1;
1430
1431 return;
1432}
1433
1434
1435void CodeEmitter::finishGVStub() {
1436 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1437
1438 // restore
1439 BufferBegin = mpSavedBufferBegin;
1440 BufferEnd = mpSavedBufferEnd;
1441 CurBufferPtr = mpSavedCurBufferPtr;
1442}
1443
1444
1445// Allocates and fills storage for an indirect GlobalValue, and returns the
1446// address.
1447void *CodeEmitter::allocIndirectGV(const llvm::GlobalValue *GV,
1448 const uint8_t *Buffer, size_t Size,
1449 unsigned Alignment) {
1450 uint8_t *IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
1451 memcpy(IndGV, Buffer, Size);
1452 return IndGV;
1453}
1454
1455
1456// Allocate memory for a global. Unlike allocateSpace, this method does not
1457// allocate memory in the current output buffer, because a global may live
1458// longer than the current function.
1459void *CodeEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1460 // Delegate this call through the memory manager.
1461 return mpMemMgr->allocateGlobal(Size, Alignment);
1462}
1463
1464
1465// This should be called by the target when a new basic block is about to be
1466// emitted. This way the MCE knows where the start of the block is, and can
1467// implement getMachineBasicBlockAddress.
1468void CodeEmitter::StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
1469 if (mMBBLocations.size() <= (unsigned) MBB->getNumber())
1470 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
1471 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
1472 return;
1473}
1474
1475
1476// Return the address of the jump table with index @Index in the function
1477// that last called initJumpTableInfo.
1478uintptr_t CodeEmitter::getJumpTableEntryAddress(unsigned Index) const {
1479 const std::vector<llvm::MachineJumpTableEntry> &JT =
1480 mpJumpTable->getJumpTables();
1481
1482 assert((Index < JT.size()) && "Invalid jump table index!");
1483
1484 unsigned int Offset = 0;
1485 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
1486
1487 for (unsigned i = 0; i < Index; i++)
1488 Offset += JT[i].MBBs.size();
1489 Offset *= EntrySize;
1490
1491 return (uintptr_t)(reinterpret_cast<uint8_t*>(mpJumpTableBase) + Offset);
1492}
1493
1494
1495// Return the address of the specified MachineBasicBlock, only usable after
1496// the label for the MBB has been emitted.
1497uintptr_t CodeEmitter::getMachineBasicBlockAddress(
1498 llvm::MachineBasicBlock *MBB) const {
1499 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
1500 mMBBLocations[MBB->getNumber()] &&
1501 "MBB not emitted!");
1502 return mMBBLocations[MBB->getNumber()];
1503}
1504
1505
1506void CodeEmitter::updateFunctionStub(const llvm::Function *F) {
1507 // Get the empty stub we generated earlier.
1508 void *Stub;
1509 std::set<const llvm::Function*>::iterator I = PendingFunctions.find(F);
1510 if (I != PendingFunctions.end())
1511 Stub = mFunctionToLazyStubMap[F];
1512 else
1513 return;
1514
1515 void *Addr = GetPointerToGlobalIfAvailable(F);
1516
1517 assert(Addr != Stub &&
1518 "Function must have non-stub address to be updated.");
1519
1520 // Tell the target jit info to rewrite the stub at the specified address,
1521 // rather than creating a new one.
1522 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1523 startGVStub(Stub, SL.Size);
1524 mpTJI->emitFunctionStub(F, Addr, *this);
1525 finishGVStub();
1526
1527 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1528 SL.Size, true);
1529
1530 PendingFunctions.erase(I);
1531}
1532
1533
1534void CodeEmitter::getFunctionNames(BCCsizei *actualFunctionCount,
1535 BCCsizei maxFunctionCount,
1536 BCCchar **functions) {
1537 int functionCount = mEmittedFunctions.size();
1538
1539 if (actualFunctionCount)
1540 *actualFunctionCount = functionCount;
1541 if (functionCount > maxFunctionCount)
1542 functionCount = maxFunctionCount;
1543 if (functions)
1544 for (EmittedFunctionsMapTy::const_iterator
1545 I = mEmittedFunctions.begin(), E = mEmittedFunctions.end();
1546 I != E && (functionCount > 0); I++, functionCount--) {
1547 *functions++ = const_cast<BCCchar*>(I->first.c_str());
1548 }
1549}
1550
1551
1552void CodeEmitter::getFunctionBinary(BCCchar *label,
1553 BCCvoid **base,
1554 BCCsizei *length) {
1555 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(label);
1556 if (I == mEmittedFunctions.end()) {
1557 *base = NULL;
1558 *length = 0;
1559 } else {
1560 *base = I->second->Code;
1561 *length = I->second->Size;
1562 }
1563}
1564
1565} // namespace bcc