blob: 35387db34da8ed308a47df72a44b75ecdd8c7ca0 [file] [log] [blame]
Logan28325bf2010-11-26 23:27:41 +08001/*
2 * Copyright 2010, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bcc_code_emitter.h"
18
19#include "bcc_buff_mem_object.h"
20#include "bcc_code_mem_manager.h"
21#include "bcc_emitted_func_code.h"
22
23#include <bcc/bcc.h>
24#include <bcc/bcc_cache.h>
25
26#include "llvm/ADT/APFloat.h"
27#include "llvm/ADT/APInt.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/StringRef.h"
31
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineConstantPool.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineModuleInfo.h"
36#include "llvm/CodeGen/MachineRelocation.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/JITCodeEmitter.h"
39
40#include "llvm/ExecutionEngine/GenericValue.h"
41
42#include "llvm/MC/MCAsmInfo.h"
43#include "llvm/MC/MCDisassembler.h"
44#include "llvm/MC/MCInst.h"
45#include "llvm/MC/MCInstPrinter.h"
46
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/raw_ostream.h"
49
50#include "llvm/System/Host.h"
51
52#include "llvm/Target/TargetData.h"
53#include "llvm/Target/TargetMachine.h"
54#include "llvm/Target/TargetRegistry.h"
55#include "llvm/Target/TargetJITInfo.h"
56
57#include "llvm/Constant.h"
58#include "llvm/Constants.h"
59#include "llvm/DerivedTypes.h"
60#include "llvm/Function.h"
61#include "llvm/GlobalAlias.h"
62#include "llvm/GlobalValue.h"
63#include "llvm/GlobalVariable.h"
64#include "llvm/Instruction.h"
65#include "llvm/Type.h"
66
67#include <algorithm>
68#include <vector>
69#include <set>
70#include <string>
71
72#include <stddef.h>
73
74
75namespace bcc {
76
77// Will take the ownership of @MemMgr
78CodeEmitter::CodeEmitter(CodeMemoryManager *pMemMgr)
79 : mpMemMgr(pMemMgr),
80 mpTarget(NULL),
81 mpTJI(NULL),
82 mpTD(NULL),
83 mpCurEmitFunction(NULL),
84 mpConstantPool(NULL),
85 mpJumpTable(NULL),
86 mpMMI(NULL),
87#if defined(USE_DISASSEMBLER)
88 mpAsmInfo(NULL),
89 mpDisassmbler(NULL),
90 mpIP(NULL),
91#endif
92 mpSymbolLookupFn(NULL),
93 mpSymbolLookupContext(NULL) {
94}
95
96
97CodeEmitter::~CodeEmitter() {
98 delete mpMemMgr;
99#if defined(USE_DISASSEMBLER)
100 delete mpAsmInfo;
101 delete mpDisassmbler;
102 delete mpIP;
103#endif
104}
105
106
107// Once you finish the compilation on a translation unit, you can call this
108// function to recycle the memory (which is used at compilation time and not
109// needed for runtime).
110//
111// NOTE: You should not call this funtion until the code-gen passes for a
112// given module is done. Otherwise, the results is undefined and may
113// cause the system crash!
114void CodeEmitter::releaseUnnecessary() {
115 mMBBLocations.clear();
116 mLabelLocations.clear();
117 mGlobalAddressMap.clear();
118 mFunctionToLazyStubMap.clear();
119 GlobalToIndirectSymMap.clear();
120 ExternalFnToStubMap.clear();
121 PendingFunctions.clear();
122}
123
124
125void CodeEmitter::reset() {
126 releaseUnnecessary();
127
128 mpSymbolLookupFn = NULL;
129 mpSymbolLookupContext = NULL;
130
131 mpTJI = NULL;
132 mpTD = NULL;
133
134 for (EmittedFunctionsMapTy::iterator I = mEmittedFunctions.begin(),
135 E = mEmittedFunctions.end();
136 I != E;
137 I++)
138 if (I->second != NULL)
139 delete I->second;
140 mEmittedFunctions.clear();
141
142 mpMemMgr->reset();
143}
144
145
146void *CodeEmitter::UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
147 if (Addr == NULL) {
148 // Removing mapping
149 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
150 void *OldVal;
151
152 if (I == mGlobalAddressMap.end()) {
153 OldVal = NULL;
154 } else {
155 OldVal = I->second;
156 mGlobalAddressMap.erase(I);
157 }
158
159 return OldVal;
160 }
161
162 void *&CurVal = mGlobalAddressMap[GV];
163 void *OldVal = CurVal;
164
165 CurVal = Addr;
166
167 return OldVal;
168}
169
170
171unsigned int CodeEmitter::GetConstantPoolSizeInBytes(
172 llvm::MachineConstantPool *MCP) {
173 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
174 MCP->getConstants();
175
176 if (Constants.empty())
177 return 0;
178
179 unsigned int Size = 0;
180 for (int i = 0, e = Constants.size(); i != e; i++) {
181 llvm::MachineConstantPoolEntry CPE = Constants[i];
182 unsigned int AlignMask = CPE.getAlignment() - 1;
183 Size = (Size + AlignMask) & ~AlignMask;
184 const llvm::Type *Ty = CPE.getType();
185 Size += mpTD->getTypeAllocSize(Ty);
186 }
187
188 return Size;
189}
190
191// This function converts a Constant* into a GenericValue. The interesting
192// part is if C is a ConstantExpr.
193void CodeEmitter::GetConstantValue(const llvm::Constant *C,
194 llvm::GenericValue &Result) {
195 if (C->getValueID() == llvm::Value::UndefValueVal)
196 return;
197 else if (C->getValueID() == llvm::Value::ConstantExprVal) {
198 const llvm::ConstantExpr *CE = (llvm::ConstantExpr*) C;
199 const llvm::Constant *Op0 = CE->getOperand(0);
200
201 switch (CE->getOpcode()) {
202 case llvm::Instruction::GetElementPtr: {
203 // Compute the index
204 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
205 CE->op_end());
206 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
207 &Indices[0],
208 Indices.size());
209
210 GetConstantValue(Op0, Result);
211 Result.PointerVal =
212 static_cast<uint8_t*>(Result.PointerVal) + Offset;
213
214 return;
215 }
216 case llvm::Instruction::Trunc: {
217 uint32_t BitWidth =
218 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
219
220 GetConstantValue(Op0, Result);
221 Result.IntVal = Result.IntVal.trunc(BitWidth);
222
223 return;
224 }
225 case llvm::Instruction::ZExt: {
226 uint32_t BitWidth =
227 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
228
229 GetConstantValue(Op0, Result);
230 Result.IntVal = Result.IntVal.zext(BitWidth);
231
232 return;
233 }
234 case llvm::Instruction::SExt: {
235 uint32_t BitWidth =
236 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
237
238 GetConstantValue(Op0, Result);
239 Result.IntVal = Result.IntVal.sext(BitWidth);
240
241 return;
242 }
243 case llvm::Instruction::FPTrunc: {
244 // TODO(all): fixme: long double
245 GetConstantValue(Op0, Result);
246 Result.FloatVal = static_cast<float>(Result.DoubleVal);
247 return;
248 }
249 case llvm::Instruction::FPExt: {
250 // TODO(all): fixme: long double
251 GetConstantValue(Op0, Result);
252 Result.DoubleVal = static_cast<double>(Result.FloatVal);
253 return;
254 }
255 case llvm::Instruction::UIToFP: {
256 GetConstantValue(Op0, Result);
257 if (CE->getType()->isFloatTy())
258 Result.FloatVal =
259 static_cast<float>(Result.IntVal.roundToDouble());
260 else if (CE->getType()->isDoubleTy())
261 Result.DoubleVal = Result.IntVal.roundToDouble();
262 else if (CE->getType()->isX86_FP80Ty()) {
263 const uint64_t zero[] = { 0, 0 };
264 llvm::APFloat apf(llvm::APInt(80, 2, zero));
265 apf.convertFromAPInt(Result.IntVal,
266 false,
267 llvm::APFloat::rmNearestTiesToEven);
268 Result.IntVal = apf.bitcastToAPInt();
269 }
270 return;
271 }
272 case llvm::Instruction::SIToFP: {
273 GetConstantValue(Op0, Result);
274 if (CE->getType()->isFloatTy())
275 Result.FloatVal =
276 static_cast<float>(Result.IntVal.signedRoundToDouble());
277 else if (CE->getType()->isDoubleTy())
278 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
279 else if (CE->getType()->isX86_FP80Ty()) {
280 const uint64_t zero[] = { 0, 0 };
281 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
282 apf.convertFromAPInt(Result.IntVal,
283 true,
284 llvm::APFloat::rmNearestTiesToEven);
285 Result.IntVal = apf.bitcastToAPInt();
286 }
287 return;
288 }
289 // double->APInt conversion handles sign
290 case llvm::Instruction::FPToUI:
291 case llvm::Instruction::FPToSI: {
292 uint32_t BitWidth =
293 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
294
295 GetConstantValue(Op0, Result);
296 if (Op0->getType()->isFloatTy())
297 Result.IntVal =
298 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
299 else if (Op0->getType()->isDoubleTy())
300 Result.IntVal =
301 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal,
302 BitWidth);
303 else if (Op0->getType()->isX86_FP80Ty()) {
304 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
305 uint64_t V;
306 bool Ignored;
307 apf.convertToInteger(&V,
308 BitWidth,
309 CE->getOpcode() == llvm::Instruction::FPToSI,
310 llvm::APFloat::rmTowardZero,
311 &Ignored);
312 Result.IntVal = V; // endian?
313 }
314 return;
315 }
316 case llvm::Instruction::PtrToInt: {
317 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
318
319 GetConstantValue(Op0, Result);
320 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
321 (Result.PointerVal));
322
323 return;
324 }
325 case llvm::Instruction::IntToPtr: {
326 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
327
328 GetConstantValue(Op0, Result);
329 if (PtrWidth != Result.IntVal.getBitWidth())
330 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
331 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
332
333 Result.PointerVal =
334 llvm::PointerTy(
335 static_cast<uintptr_t>(Result.IntVal.getZExtValue()));
336
337 return;
338 }
339 case llvm::Instruction::BitCast: {
340 GetConstantValue(Op0, Result);
341 const llvm::Type *DestTy = CE->getType();
342
343 switch (Op0->getType()->getTypeID()) {
344 case llvm::Type::IntegerTyID: {
345 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
346 if (DestTy->isFloatTy())
347 Result.FloatVal = Result.IntVal.bitsToFloat();
348 else if (DestTy->isDoubleTy())
349 Result.DoubleVal = Result.IntVal.bitsToDouble();
350 break;
351 }
352 case llvm::Type::FloatTyID: {
353 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
354 Result.IntVal.floatToBits(Result.FloatVal);
355 break;
356 }
357 case llvm::Type::DoubleTyID: {
358 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
359 Result.IntVal.doubleToBits(Result.DoubleVal);
360 break;
361 }
362 case llvm::Type::PointerTyID: {
363 assert(DestTy->isPointerTy() && "Invalid bitcast");
364 break; // getConstantValue(Op0) above already converted it
365 }
366 default: {
367 llvm_unreachable("Invalid bitcast operand");
368 }
369 }
370 return;
371 }
372 case llvm::Instruction::Add:
373 case llvm::Instruction::FAdd:
374 case llvm::Instruction::Sub:
375 case llvm::Instruction::FSub:
376 case llvm::Instruction::Mul:
377 case llvm::Instruction::FMul:
378 case llvm::Instruction::UDiv:
379 case llvm::Instruction::SDiv:
380 case llvm::Instruction::URem:
381 case llvm::Instruction::SRem:
382 case llvm::Instruction::And:
383 case llvm::Instruction::Or:
384 case llvm::Instruction::Xor: {
385 llvm::GenericValue LHS, RHS;
386 GetConstantValue(Op0, LHS);
387 GetConstantValue(CE->getOperand(1), RHS);
388
389 switch (Op0->getType()->getTypeID()) {
390 case llvm::Type::IntegerTyID: {
391 switch (CE->getOpcode()) {
392 case llvm::Instruction::Add: {
393 Result.IntVal = LHS.IntVal + RHS.IntVal;
394 break;
395 }
396 case llvm::Instruction::Sub: {
397 Result.IntVal = LHS.IntVal - RHS.IntVal;
398 break;
399 }
400 case llvm::Instruction::Mul: {
401 Result.IntVal = LHS.IntVal * RHS.IntVal;
402 break;
403 }
404 case llvm::Instruction::UDiv: {
405 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
406 break;
407 }
408 case llvm::Instruction::SDiv: {
409 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
410 break;
411 }
412 case llvm::Instruction::URem: {
413 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
414 break;
415 }
416 case llvm::Instruction::SRem: {
417 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
418 break;
419 }
420 case llvm::Instruction::And: {
421 Result.IntVal = LHS.IntVal & RHS.IntVal;
422 break;
423 }
424 case llvm::Instruction::Or: {
425 Result.IntVal = LHS.IntVal | RHS.IntVal;
426 break;
427 }
428 case llvm::Instruction::Xor: {
429 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
430 break;
431 }
432 default: {
433 llvm_unreachable("Invalid integer opcode");
434 }
435 }
436 break;
437 }
438 case llvm::Type::FloatTyID: {
439 switch (CE->getOpcode()) {
440 case llvm::Instruction::FAdd: {
441 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
442 break;
443 }
444 case llvm::Instruction::FSub: {
445 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
446 break;
447 }
448 case llvm::Instruction::FMul: {
449 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
450 break;
451 }
452 case llvm::Instruction::FDiv: {
453 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
454 break;
455 }
456 case llvm::Instruction::FRem: {
457 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
458 break;
459 }
460 default: {
461 llvm_unreachable("Invalid float opcode");
462 }
463 }
464 break;
465 }
466 case llvm::Type::DoubleTyID: {
467 switch (CE->getOpcode()) {
468 case llvm::Instruction::FAdd: {
469 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
470 break;
471 }
472 case llvm::Instruction::FSub: {
473 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
474 break;
475 }
476 case llvm::Instruction::FMul: {
477 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
478 break;
479 }
480 case llvm::Instruction::FDiv: {
481 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
482 break;
483 }
484 case llvm::Instruction::FRem: {
485 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
486 break;
487 }
488 default: {
489 llvm_unreachable("Invalid double opcode");
490 }
491 }
492 break;
493 }
494 case llvm::Type::X86_FP80TyID:
495 case llvm::Type::PPC_FP128TyID:
496 case llvm::Type::FP128TyID: {
497 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
498 switch (CE->getOpcode()) {
499 case llvm::Instruction::FAdd: {
500 apfLHS.add(llvm::APFloat(RHS.IntVal),
501 llvm::APFloat::rmNearestTiesToEven);
502 break;
503 }
504 case llvm::Instruction::FSub: {
505 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
506 llvm::APFloat::rmNearestTiesToEven);
507 break;
508 }
509 case llvm::Instruction::FMul: {
510 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
511 llvm::APFloat::rmNearestTiesToEven);
512 break;
513 }
514 case llvm::Instruction::FDiv: {
515 apfLHS.divide(llvm::APFloat(RHS.IntVal),
516 llvm::APFloat::rmNearestTiesToEven);
517 break;
518 }
519 case llvm::Instruction::FRem: {
520 apfLHS.mod(llvm::APFloat(RHS.IntVal),
521 llvm::APFloat::rmNearestTiesToEven);
522 break;
523 }
524 default: {
525 llvm_unreachable("Invalid long double opcode");
526 }
527 }
528 Result.IntVal = apfLHS.bitcastToAPInt();
529 break;
530 }
531 default: {
532 llvm_unreachable("Bad add type!");
533 }
534 } // End switch (Op0->getType()->getTypeID())
535 return;
536 }
537 default: {
538 break;
539 }
540 } // End switch (CE->getOpcode())
541
542 std::string msg;
543 llvm::raw_string_ostream Msg(msg);
544 Msg << "ConstantExpr not handled: " << *CE;
545 llvm::report_fatal_error(Msg.str());
546 } // C->getValueID() == llvm::Value::ConstantExprVal
547
548 switch (C->getType()->getTypeID()) {
549 case llvm::Type::FloatTyID: {
550 Result.FloatVal =
551 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToFloat();
552 break;
553 }
554 case llvm::Type::DoubleTyID: {
555 Result.DoubleVal =
556 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToDouble();
557 break;
558 }
559 case llvm::Type::X86_FP80TyID:
560 case llvm::Type::FP128TyID:
561 case llvm::Type::PPC_FP128TyID: {
562 Result.IntVal =
563 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().bitcastToAPInt();
564 break;
565 }
566 case llvm::Type::IntegerTyID: {
567 Result.IntVal =
568 llvm::cast<llvm::ConstantInt>(C)->getValue();
569 break;
570 }
571 case llvm::Type::PointerTyID: {
572 switch (C->getValueID()) {
573 case llvm::Value::ConstantPointerNullVal: {
574 Result.PointerVal = NULL;
575 break;
576 }
577 case llvm::Value::FunctionVal: {
578 const llvm::Function *F = static_cast<const llvm::Function*>(C);
579 Result.PointerVal =
580 GetPointerToFunctionOrStub(const_cast<llvm::Function*>(F));
581 break;
582 }
583 case llvm::Value::GlobalVariableVal: {
584 const llvm::GlobalVariable *GV =
585 static_cast<const llvm::GlobalVariable*>(C);
586 Result.PointerVal =
587 GetOrEmitGlobalVariable(const_cast<llvm::GlobalVariable*>(GV));
588 break;
589 }
590 case llvm::Value::BlockAddressVal: {
591 assert(false && "JIT does not support address-of-label yet!");
592 }
593 default: {
594 llvm_unreachable("Unknown constant pointer type!");
595 }
596 }
597 break;
598 }
599 default: {
600 std::string msg;
601 llvm::raw_string_ostream Msg(msg);
602 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
603 llvm::report_fatal_error(Msg.str());
604 break;
605 }
606 }
607 return;
608}
609
610
611// Stores the data in @Val of type @Ty at address @Addr.
612void CodeEmitter::StoreValueToMemory(const llvm::GenericValue &Val,
613 void *Addr,
614 const llvm::Type *Ty) {
615 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
616
617 switch (Ty->getTypeID()) {
618 case llvm::Type::IntegerTyID: {
619 const llvm::APInt &IntVal = Val.IntVal;
620 assert(((IntVal.getBitWidth() + 7) / 8 >= StoreBytes) &&
621 "Integer too small!");
622
623 const uint8_t *Src =
624 reinterpret_cast<const uint8_t*>(IntVal.getRawData());
625
626 if (llvm::sys::isLittleEndianHost()) {
627 // Little-endian host - the source is ordered from LSB to MSB.
628 // Order the destination from LSB to MSB: Do a straight copy.
629 memcpy(Addr, Src, StoreBytes);
630 } else {
631 // Big-endian host - the source is an array of 64 bit words
632 // ordered from LSW to MSW.
633 //
634 // Each word is ordered from MSB to LSB.
635 //
636 // Order the destination from MSB to LSB:
637 // Reverse the word order, but not the bytes in a word.
638 unsigned int i = StoreBytes;
639 while (i > sizeof(uint64_t)) {
640 i -= sizeof(uint64_t);
641 ::memcpy(reinterpret_cast<uint8_t*>(Addr) + i,
642 Src,
643 sizeof(uint64_t));
644 Src += sizeof(uint64_t);
645 }
646 ::memcpy(Addr, Src + sizeof(uint64_t) - i, i);
647 }
648 break;
649 }
650 case llvm::Type::FloatTyID: {
651 *reinterpret_cast<float*>(Addr) = Val.FloatVal;
652 break;
653 }
654 case llvm::Type::DoubleTyID: {
655 *reinterpret_cast<double*>(Addr) = Val.DoubleVal;
656 break;
657 }
658 case llvm::Type::X86_FP80TyID: {
659 memcpy(Addr, Val.IntVal.getRawData(), 10);
660 break;
661 }
662 case llvm::Type::PointerTyID: {
663 // Ensure 64 bit target pointers are fully initialized on 32 bit
664 // hosts.
665 if (StoreBytes != sizeof(llvm::PointerTy))
666 memset(Addr, 0, StoreBytes);
667 *((llvm::PointerTy*) Addr) = Val.PointerVal;
668 break;
669 }
670 default: {
671 break;
672 }
673 }
674
675 if (llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
676 std::reverse(reinterpret_cast<uint8_t*>(Addr),
677 reinterpret_cast<uint8_t*>(Addr) + StoreBytes);
678
679 return;
680}
681
682
683// Recursive function to apply a @Constant value into the specified memory
684// location @Addr.
685void CodeEmitter::InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
686 switch (C->getValueID()) {
687 case llvm::Value::UndefValueVal: {
688 // Nothing to do
689 break;
690 }
691 case llvm::Value::ConstantVectorVal: {
692 // dynamic cast may hurt performance
693 const llvm::ConstantVector *CP = (llvm::ConstantVector*) C;
694
695 unsigned int ElementSize = mpTD->getTypeAllocSize
696 (CP->getType()->getElementType());
697
698 for (int i = 0, e = CP->getNumOperands(); i != e;i++)
699 InitializeConstantToMemory(
700 CP->getOperand(i),
701 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
702 break;
703 }
704 case llvm::Value::ConstantAggregateZeroVal: {
705 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
706 break;
707 }
708 case llvm::Value::ConstantArrayVal: {
709 const llvm::ConstantArray *CPA = (llvm::ConstantArray*) C;
710 unsigned int ElementSize = mpTD->getTypeAllocSize
711 (CPA->getType()->getElementType());
712
713 for (int i = 0, e = CPA->getNumOperands(); i != e; i++)
714 InitializeConstantToMemory(
715 CPA->getOperand(i),
716 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
717 break;
718 }
719 case llvm::Value::ConstantStructVal: {
720 const llvm::ConstantStruct *CPS =
721 static_cast<const llvm::ConstantStruct*>(C);
722 const llvm::StructLayout *SL = mpTD->getStructLayout
723 (llvm::cast<llvm::StructType>(CPS->getType()));
724
725 for (int i = 0, e = CPS->getNumOperands(); i != e; i++)
726 InitializeConstantToMemory(
727 CPS->getOperand(i),
728 reinterpret_cast<uint8_t*>(Addr) + SL->getElementOffset(i));
729 break;
730 }
731 default: {
732 if (C->getType()->isFirstClassType()) {
733 llvm::GenericValue Val;
734 GetConstantValue(C, Val);
735 StoreValueToMemory(Val, Addr, C->getType());
736 } else {
737 llvm_unreachable("Unknown constant type to initialize memory "
738 "with!");
739 }
740 break;
741 }
742 }
743 return;
744}
745
746
747void CodeEmitter::emitConstantPool(llvm::MachineConstantPool *MCP) {
748 if (mpTJI->hasCustomConstantPool())
749 return;
750
751 // Constant pool address resolution is handled by the target itself in ARM
752 // (TargetJITInfo::hasCustomConstantPool() returns true).
753#if !defined(PROVIDE_ARM_CODEGEN)
754 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
755 MCP->getConstants();
756
757 if (Constants.empty())
758 return;
759
760 unsigned Size = GetConstantPoolSizeInBytes(MCP);
761 unsigned Align = MCP->getConstantPoolAlignment();
762
763 mpConstantPoolBase = allocateSpace(Size, Align);
764 mpConstantPool = MCP;
765
766 if (mpConstantPoolBase == NULL)
767 return; // out of memory
768
769 unsigned Offset = 0;
770 for (int i = 0, e = Constants.size(); i != e; i++) {
771 llvm::MachineConstantPoolEntry CPE = Constants[i];
772 unsigned AlignMask = CPE.getAlignment() - 1;
773 Offset = (Offset + AlignMask) & ~AlignMask;
774
775 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
776 mConstPoolAddresses.push_back(CAddr);
777
778 if (CPE.isMachineConstantPoolEntry())
779 llvm::report_fatal_error
780 ("Initialize memory with machine specific constant pool"
781 " entry has not been implemented!");
782
783 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
784
785 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
786 Offset += mpTD->getTypeAllocSize(Ty);
787 }
788#endif
789 return;
790}
791
792
793void CodeEmitter::initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
794 if (mpTJI->hasCustomJumpTables())
795 return;
796
797 const std::vector<llvm::MachineJumpTableEntry> &JT =
798 MJTI->getJumpTables();
799 if (JT.empty())
800 return;
801
802 unsigned NumEntries = 0;
803 for (int i = 0, e = JT.size(); i != e; i++)
804 NumEntries += JT[i].MBBs.size();
805
806 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
807
808 mpJumpTable = MJTI;
809 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
810 MJTI->getEntryAlignment(*mpTD));
811
812 return;
813}
814
815
816void CodeEmitter::emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
817 if (mpTJI->hasCustomJumpTables())
818 return;
819
820 const std::vector<llvm::MachineJumpTableEntry> &JT =
821 MJTI->getJumpTables();
822 if (JT.empty() || mpJumpTableBase == 0)
823 return;
824
825 assert(llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static &&
826 (MJTI->getEntrySize(*mpTD) == sizeof(mpTD /* a pointer type */)) &&
827 "Cross JIT'ing?");
828
829 // For each jump table, map each target in the jump table to the
830 // address of an emitted MachineBasicBlock.
831 intptr_t *SlotPtr = reinterpret_cast<intptr_t*>(mpJumpTableBase);
832 for (int i = 0, ie = JT.size(); i != ie; i++) {
833 const std::vector<llvm::MachineBasicBlock*> &MBBs = JT[i].MBBs;
834 // Store the address of the basic block for this jump table slot in the
835 // memory we allocated for the jump table in 'initJumpTableInfo'
836 for (int j = 0, je = MBBs.size(); j != je; j++)
837 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
838 }
839}
840
841
842void *CodeEmitter::GetPointerToGlobal(llvm::GlobalValue *V,
843 void *Reference,
844 bool MayNeedFarStub) {
845 switch (V->getValueID()) {
846 case llvm::Value::FunctionVal: {
847 llvm::Function *F = (llvm::Function*) V;
848
849 // If we have code, go ahead and return that.
850 if (void *ResultPtr = GetPointerToGlobalIfAvailable(F))
851 return ResultPtr;
852
853 if (void *FnStub = GetLazyFunctionStubIfAvailable(F))
854 // Return the function stub if it's already created.
855 // We do this first so that:
856 // we're returning the same address for the function as any
857 // previous call.
858 //
859 // TODO(llvm.org): Yes, this is wrong. The lazy stub isn't
860 // guaranteed to be close enough to call.
861 return FnStub;
862
863 // If we know the target can handle arbitrary-distance calls, try to
864 // return a direct pointer.
865 if (!MayNeedFarStub) {
866 //
867 // x86_64 architecture may encounter the bug:
868 // http://llvm.org/bugs/show_bug.cgi?id=5201
869 // which generate instruction "call" instead of "callq".
870 //
871 // And once the real address of stub is greater than 64-bit
872 // long, the replacement will truncate to 32-bit resulting a
873 // serious problem.
874#if !defined(__x86_64__)
875 // If this is an external function pointer, we can force the JIT
876 // to 'compile' it, which really just adds it to the map.
877 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
878 return GetPointerToFunction(F, /* AbortOnFailure = */false);
879 // Changing to false because wanting to allow later calls to
880 // mpTJI->relocate() without aborting. For caching purpose
881 }
882#endif
883 }
884
885 // Otherwise, we may need a to emit a stub, and, conservatively, we
886 // always do so.
887 return GetLazyFunctionStub(F);
888 break;
889 }
890 case llvm::Value::GlobalVariableVal: {
891 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
892 break;
893 }
894 case llvm::Value::GlobalAliasVal: {
895 llvm::GlobalAlias *GA = (llvm::GlobalAlias*) V;
896 const llvm::GlobalValue *GV = GA->resolveAliasedGlobal(false);
897
898 switch (GV->getValueID()) {
899 case llvm::Value::FunctionVal: {
900 // TODO(all): is there's any possibility that the function is not
901 // code-gen'd?
902 return GetPointerToFunction(
903 static_cast<const llvm::Function*>(GV),
904 /* AbortOnFailure = */false);
905 // Changing to false because wanting to allow later calls to
906 // mpTJI->relocate() without aborting. For caching purpose
907 break;
908 }
909 case llvm::Value::GlobalVariableVal: {
910 if (void *P = mGlobalAddressMap[GV])
911 return P;
912
913 llvm::GlobalVariable *GVar = (llvm::GlobalVariable*) GV;
914 EmitGlobalVariable(GVar);
915
916 return mGlobalAddressMap[GV];
917 break;
918 }
919 case llvm::Value::GlobalAliasVal: {
920 assert(false && "Alias should be resolved ultimately!");
921 }
922 }
923 break;
924 }
925 default: {
926 break;
927 }
928 }
929 llvm_unreachable("Unknown type of global value!");
930}
931
932
933// If the specified function has been code-gen'd, return a pointer to the
934// function. If not, compile it, or use a stub to implement lazy compilation
935// if available.
936void *CodeEmitter::GetPointerToFunctionOrStub(llvm::Function *F) {
937 // If we have already code generated the function, just return the
938 // address.
939 if (void *Addr = GetPointerToGlobalIfAvailable(F))
940 return Addr;
941
942 // Get a stub if the target supports it.
943 return GetLazyFunctionStub(F);
944}
945
946
947void *CodeEmitter::GetLazyFunctionStub(llvm::Function *F) {
948 // If we already have a lazy stub for this function, recycle it.
949 void *&Stub = mFunctionToLazyStubMap[F];
950 if (Stub)
951 return Stub;
952
953 // In any cases, we should NOT resolve function at runtime (though we are
954 // able to). We resolve this right now.
955 void *Actual = NULL;
956 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
957 Actual = GetPointerToFunction(F, /* AbortOnFailure = */false);
958 // Changing to false because wanting to allow later calls to
959 // mpTJI->relocate() without aborting. For caching purpose
960 }
961
962 // Codegen a new stub, calling the actual address of the external
963 // function, if it was resolved.
964 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
965 startGVStub(F, SL.Size, SL.Alignment);
966 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
967 finishGVStub();
968
969 // We really want the address of the stub in the GlobalAddressMap for the
970 // JIT, not the address of the external function.
971 UpdateGlobalMapping(F, Stub);
972
973 if (!Actual)
974 PendingFunctions.insert(F);
975 else
976 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
977 SL.Size, true);
978
979 return Stub;
980}
981
982
983void *CodeEmitter::GetPointerToFunction(const llvm::Function *F,
984 bool AbortOnFailure) {
985 void *Addr = GetPointerToGlobalIfAvailable(F);
986 if (Addr)
987 return Addr;
988
989 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
990 "Internal error: only external defined function routes here!");
991
992 // Handle the failure resolution by ourselves.
993 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
994 /* AbortOnFailure = */ false);
995
996 // If we resolved the symbol to a null address (eg. a weak external)
997 // return a null pointer let the application handle it.
998 if (Addr == NULL) {
999 if (AbortOnFailure)
1000 llvm::report_fatal_error("Could not resolve external function "
1001 "address: " + F->getName());
1002 else
1003 return NULL;
1004 }
1005
1006 AddGlobalMapping(F, Addr);
1007
1008 return Addr;
1009}
1010
1011
1012void *CodeEmitter::GetPointerToNamedSymbol(const std::string &Name,
1013 bool AbortOnFailure) {
1014 if (void *Addr = FindRuntimeFunction(Name.c_str()))
1015 return Addr;
1016
1017 if (mpSymbolLookupFn)
1018 if (void *Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1019 return Addr;
1020
1021 if (AbortOnFailure)
1022 llvm::report_fatal_error("Program used external symbol '" + Name +
1023 "' which could not be resolved!");
1024
1025 return NULL;
1026}
1027
1028
1029// Return the address of the specified global variable, possibly emitting it
1030// to memory if needed. This is used by the Emitter.
1031void *CodeEmitter::GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1032 void *Ptr = GetPointerToGlobalIfAvailable(GV);
1033 if (Ptr)
1034 return Ptr;
1035
1036 if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1037 // If the global is external, just remember the address.
1038 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1039 AddGlobalMapping(GV, Ptr);
1040 } else {
1041 // If the global hasn't been emitted to memory yet, allocate space and
1042 // emit it into memory.
1043 Ptr = GetMemoryForGV(GV);
1044 AddGlobalMapping(GV, Ptr);
1045 EmitGlobalVariable(GV);
1046 }
1047
1048 return Ptr;
1049}
1050
1051
1052// This method abstracts memory allocation of global variable so that the
1053// JIT can allocate thread local variables depending on the target.
1054void *CodeEmitter::GetMemoryForGV(const llvm::GlobalVariable *GV) {
1055 void *Ptr;
1056
1057 const llvm::Type *GlobalType = GV->getType()->getElementType();
1058 size_t S = mpTD->getTypeAllocSize(GlobalType);
1059 size_t A = mpTD->getPreferredAlignment(GV);
1060
1061 if (GV->isThreadLocal()) {
1062 // We can support TLS by
1063 //
1064 // Ptr = TJI.allocateThreadLocalMemory(S);
1065 //
1066 // But I tend not to.
1067 // (should we disable this in the front-end (i.e., slang)?).
1068 llvm::report_fatal_error
1069 ("Compilation of Thread Local Storage (TLS) is disabled!");
1070
1071 } else if (mpTJI->allocateSeparateGVMemory()) {
1072 if (A <= 8) {
1073 Ptr = malloc(S);
1074 } else {
1075 // Allocate (S + A) bytes of memory, then use an aligned pointer
1076 // within that space.
1077 Ptr = malloc(S + A);
1078 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1079 Ptr = reinterpret_cast<uint8_t*>(Ptr) +
1080 (MisAligned ? (A - MisAligned) : 0);
1081 }
1082 } else {
1083 Ptr = allocateGlobal(S, A);
1084 }
1085
1086 return Ptr;
1087}
1088
1089
1090void CodeEmitter::EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1091 void *GA = GetPointerToGlobalIfAvailable(GV);
1092
1093 if (GV->isThreadLocal())
1094 llvm::report_fatal_error
1095 ("We don't support Thread Local Storage (TLS)!");
1096
1097 if (GA == NULL) {
1098 // If it's not already specified, allocate memory for the global.
1099 GA = GetMemoryForGV(GV);
1100 AddGlobalMapping(GV, GA);
1101 }
1102
1103 InitializeConstantToMemory(GV->getInitializer(), GA);
1104
1105 // You can do some statistics on global variable here.
1106 return;
1107}
1108
1109
1110void *CodeEmitter::GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1111 // Make sure GV is emitted first, and create a stub containing the fully
1112 // resolved address.
1113 void *GVAddress = GetPointerToGlobal(V, Reference, false);
1114
1115 // If we already have a stub for this global variable, recycle it.
1116 void *&IndirectSym = GlobalToIndirectSymMap[V];
1117 // Otherwise, codegen a new indirect symbol.
1118 if (!IndirectSym)
1119 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1120
1121 return IndirectSym;
1122}
1123
1124
1125// Return a stub for the function at the specified address.
1126void *CodeEmitter::GetExternalFunctionStub(void *FnAddr) {
1127 void *&Stub = ExternalFnToStubMap[FnAddr];
1128 if (Stub)
1129 return Stub;
1130
1131 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1132 startGVStub(0, SL.Size, SL.Alignment);
1133 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1134 finishGVStub();
1135
1136 return Stub;
1137}
1138
1139
1140#if defined(USE_DISASSEMBLER)
1141void CodeEmitter::Disassemble(const llvm::StringRef &Name,
1142 uint8_t *Start, size_t Length, bool IsStub) {
1143 llvm::raw_ostream *OS;
1144
1145#if defined(USE_DISASSEMBLER_FILE)
1146 std::string ErrorInfo;
1147 OS = new llvm::raw_fd_ostream("/data/local/tmp/out.S",
1148 ErrorInfo,
1149 llvm::raw_fd_ostream::F_Append);
1150 if (!ErrorInfo.empty()) { // some errors occurred
1151 // LOGE("Error in creating disassembly file");
1152 delete OS;
1153 return;
1154 }
1155#else
1156 OS = &llvm::outs();
1157#endif
1158
1159 *OS << "JIT: Disassembled code: " << Name << ((IsStub) ? " (stub)" : "")
1160 << "\n";
1161
1162 if (mpAsmInfo == NULL)
1163 mpAsmInfo = mpTarget->createAsmInfo(Triple);
1164 if (mpDisassmbler == NULL)
1165 mpDisassmbler = mpTarget->createMCDisassembler();
1166 if (mpIP == NULL)
1167 mpIP = mpTarget->createMCInstPrinter(mpAsmInfo->getAssemblerDialect(),
1168 *mpAsmInfo);
1169
1170 const BufferMemoryObject *BufferMObj = new BufferMemoryObject(Start,
1171 Length);
1172 uint64_t Size;
1173 uint64_t Index;
1174
1175 for (Index = 0; Index < Length; Index += Size) {
1176 llvm::MCInst Inst;
1177
1178 if (mpDisassmbler->getInstruction(Inst, Size, *BufferMObj, Index,
1179 /* REMOVED */ llvm::nulls())) {
1180 (*OS).indent(4)
1181 .write("0x", 2)
1182 .write_hex((uint32_t) Start + Index)
1183 .write(':');
1184 mpIP->printInst(&Inst, *OS);
1185 *OS << "\n";
1186 } else {
1187 if (Size == 0)
1188 Size = 1; // skip illegible bytes
1189 }
1190 }
1191
1192 *OS << "\n";
1193 delete BufferMObj;
1194
1195#if defined(USE_DISASSEMBLER_FILE)
1196 // If you want the disassemble results write to file, uncomment this.
1197 OS->close();
1198 delete OS;
1199#endif
1200
1201 return;
1202}
1203#endif // defined(USE_DISASSEMBLER)
1204
1205
1206void CodeEmitter::setTargetMachine(llvm::TargetMachine &TM) {
1207 // Set Target
1208 mpTarget = &TM.getTarget();
1209 // Set TargetJITInfo
1210 mpTJI = TM.getJITInfo();
1211 // set TargetData
1212 mpTD = TM.getTargetData();
1213
1214 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
1215
1216 return;
1217}
1218
1219
1220// This callback is invoked when the specified function is about to be code
1221// generated. This initializes the BufferBegin/End/Ptr fields.
1222void CodeEmitter::startFunction(llvm::MachineFunction &F) {
1223 uintptr_t ActualSize = 0;
1224
1225 mpMemMgr->setMemoryWritable();
1226
1227 // BufferBegin, BufferEnd and CurBufferPtr are all inherited from class
1228 // MachineCodeEmitter, which is the super class of the class
1229 // JITCodeEmitter.
1230 //
1231 // BufferBegin/BufferEnd - Pointers to the start and end of the memory
1232 // allocated for this code buffer.
1233 //
1234 // CurBufferPtr - Pointer to the next byte of memory to fill when emitting
1235 // code. This is guranteed to be in the range
1236 // [BufferBegin, BufferEnd]. If this pointer is at
1237 // BufferEnd, it will never move due to code emission, and
1238 // all code emission requests will be ignored (this is the
1239 // buffer overflow condition).
1240 BufferBegin = CurBufferPtr =
1241 mpMemMgr->startFunctionBody(F.getFunction(), ActualSize);
1242 BufferEnd = BufferBegin + ActualSize;
1243
1244 if (mpCurEmitFunction == NULL)
1245 mpCurEmitFunction = new EmittedFunctionCode();
1246 mpCurEmitFunction->FunctionBody = BufferBegin;
1247
1248 // Ensure the constant pool/jump table info is at least 4-byte aligned.
1249 emitAlignment(16);
1250
1251 emitConstantPool(F.getConstantPool());
1252 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1253 initJumpTableInfo(MJTI);
1254
1255 // About to start emitting the machine code for the function.
1256 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
1257
1258 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
1259
1260 mpCurEmitFunction->Code = CurBufferPtr;
1261
1262 mMBBLocations.clear();
1263}
1264
1265
1266// This callback is invoked when the specified function has finished code
1267// generation. If a buffer overflow has occurred, this method returns true
1268// (the callee is required to try again).
1269bool CodeEmitter::finishFunction(llvm::MachineFunction &F) {
1270 if (CurBufferPtr == BufferEnd) {
1271 // No enough memory
1272 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1273 return false;
1274 }
1275
1276 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1277 emitJumpTableInfo(MJTI);
1278
1279 // FnStart is the start of the text, not the start of the constant pool
1280 // and other per-function data.
1281 uint8_t *FnStart =
1282 reinterpret_cast<uint8_t*>(
1283 GetPointerToGlobalIfAvailable(F.getFunction()));
1284
1285 // FnEnd is the end of the function's machine code.
1286 uint8_t *FnEnd = CurBufferPtr;
1287
1288 if (!mRelocations.empty()) {
1289 ptrdiff_t BufferOffset = BufferBegin - mpMemMgr->getCodeMemBase();
1290
1291 // Resolve the relocations to concrete pointers.
1292 for (int i = 0, e = mRelocations.size(); i != e; i++) {
1293 llvm::MachineRelocation &MR = mRelocations[i];
1294 void *ResultPtr = NULL;
1295
1296 if (!MR.letTargetResolve()) {
1297 if (MR.isExternalSymbol()) {
1298 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
1299
1300 if (MR.mayNeedFarStub()) {
1301 ResultPtr = GetExternalFunctionStub(ResultPtr);
1302 }
1303
1304 } else if (MR.isGlobalValue()) {
1305 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
1306 BufferBegin
1307 + MR.getMachineCodeOffset(),
1308 MR.mayNeedFarStub());
1309 } else if (MR.isIndirectSymbol()) {
1310 ResultPtr =
1311 GetPointerToGVIndirectSym(
1312 MR.getGlobalValue(),
1313 BufferBegin + MR.getMachineCodeOffset());
1314 } else if (MR.isBasicBlock()) {
1315 ResultPtr =
1316 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
1317 } else if (MR.isConstantPoolIndex()) {
1318 ResultPtr =
1319 (void*) getConstantPoolEntryAddress(MR.getConstantPoolIndex());
1320 } else {
1321 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
1322 ResultPtr =
1323 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
1324 }
1325
1326 if (!MR.isExternalSymbol() || MR.mayNeedFarStub()) {
1327 // TODO(logan): Cache external symbol relocation entry.
1328 // Currently, we are not caching them. But since Android
1329 // system is using prelink, it is not a problem.
1330
1331 // Cache the relocation result address
1332 mCachingRelocations.push_back(
1333 oBCCRelocEntry(MR.getRelocationType(),
1334 MR.getMachineCodeOffset() + BufferOffset,
1335 ResultPtr));
1336 }
1337
1338 MR.setResultPointer(ResultPtr);
1339 }
1340 }
1341
1342 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
1343 mpMemMgr->getGOTBase());
1344 }
1345
1346 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1347 // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
1348 // global variables that were referenced in the relocations.
1349 if (CurBufferPtr == BufferEnd)
1350 return false;
1351
1352 // Now that we've succeeded in emitting the function.
1353 mpCurEmitFunction->Size = CurBufferPtr - BufferBegin;
1354 BufferBegin = CurBufferPtr = 0;
1355
1356 if (F.getFunction()->hasName())
1357 mEmittedFunctions[F.getFunction()->getNameStr()] = mpCurEmitFunction;
1358 mpCurEmitFunction = NULL;
1359
1360 mRelocations.clear();
1361 mConstPoolAddresses.clear();
1362
1363 if (mpMMI)
1364 mpMMI->EndFunction();
1365
1366 updateFunctionStub(F.getFunction());
1367
1368 // Mark code region readable and executable if it's not so already.
1369 mpMemMgr->setMemoryExecutable();
1370
1371 Disassemble(F.getFunction()->getName(), FnStart, FnEnd - FnStart, false);
1372
1373 return false;
1374}
1375
1376
1377void CodeEmitter::startGVStub(const llvm::GlobalValue *GV, unsigned StubSize,
1378 unsigned Alignment) {
1379 mpSavedBufferBegin = BufferBegin;
1380 mpSavedBufferEnd = BufferEnd;
1381 mpSavedCurBufferPtr = CurBufferPtr;
1382
1383 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
1384 Alignment);
1385 BufferEnd = BufferBegin + StubSize + 1;
1386
1387 return;
1388}
1389
1390
1391void CodeEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1392 mpSavedBufferBegin = BufferBegin;
1393 mpSavedBufferEnd = BufferEnd;
1394 mpSavedCurBufferPtr = CurBufferPtr;
1395
1396 BufferBegin = CurBufferPtr = reinterpret_cast<uint8_t *>(Buffer);
1397 BufferEnd = BufferBegin + StubSize + 1;
1398
1399 return;
1400}
1401
1402
1403void CodeEmitter::finishGVStub() {
1404 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1405
1406 // restore
1407 BufferBegin = mpSavedBufferBegin;
1408 BufferEnd = mpSavedBufferEnd;
1409 CurBufferPtr = mpSavedCurBufferPtr;
1410}
1411
1412
1413// Allocates and fills storage for an indirect GlobalValue, and returns the
1414// address.
1415void *CodeEmitter::allocIndirectGV(const llvm::GlobalValue *GV,
1416 const uint8_t *Buffer, size_t Size,
1417 unsigned Alignment) {
1418 uint8_t *IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
1419 memcpy(IndGV, Buffer, Size);
1420 return IndGV;
1421}
1422
1423
1424// Allocate memory for a global. Unlike allocateSpace, this method does not
1425// allocate memory in the current output buffer, because a global may live
1426// longer than the current function.
1427void *CodeEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1428 // Delegate this call through the memory manager.
1429 return mpMemMgr->allocateGlobal(Size, Alignment);
1430}
1431
1432
1433// This should be called by the target when a new basic block is about to be
1434// emitted. This way the MCE knows where the start of the block is, and can
1435// implement getMachineBasicBlockAddress.
1436void CodeEmitter::StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
1437 if (mMBBLocations.size() <= (unsigned) MBB->getNumber())
1438 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
1439 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
1440 return;
1441}
1442
1443
1444// Return the address of the jump table with index @Index in the function
1445// that last called initJumpTableInfo.
1446uintptr_t CodeEmitter::getJumpTableEntryAddress(unsigned Index) const {
1447 const std::vector<llvm::MachineJumpTableEntry> &JT =
1448 mpJumpTable->getJumpTables();
1449
1450 assert((Index < JT.size()) && "Invalid jump table index!");
1451
1452 unsigned int Offset = 0;
1453 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
1454
1455 for (unsigned i = 0; i < Index; i++)
1456 Offset += JT[i].MBBs.size();
1457 Offset *= EntrySize;
1458
1459 return (uintptr_t)(reinterpret_cast<uint8_t*>(mpJumpTableBase) + Offset);
1460}
1461
1462
1463// Return the address of the specified MachineBasicBlock, only usable after
1464// the label for the MBB has been emitted.
1465uintptr_t CodeEmitter::getMachineBasicBlockAddress(
1466 llvm::MachineBasicBlock *MBB) const {
1467 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
1468 mMBBLocations[MBB->getNumber()] &&
1469 "MBB not emitted!");
1470 return mMBBLocations[MBB->getNumber()];
1471}
1472
1473
1474void CodeEmitter::updateFunctionStub(const llvm::Function *F) {
1475 // Get the empty stub we generated earlier.
1476 void *Stub;
1477 std::set<const llvm::Function*>::iterator I = PendingFunctions.find(F);
1478 if (I != PendingFunctions.end())
1479 Stub = mFunctionToLazyStubMap[F];
1480 else
1481 return;
1482
1483 void *Addr = GetPointerToGlobalIfAvailable(F);
1484
1485 assert(Addr != Stub &&
1486 "Function must have non-stub address to be updated.");
1487
1488 // Tell the target jit info to rewrite the stub at the specified address,
1489 // rather than creating a new one.
1490 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1491 startGVStub(Stub, SL.Size);
1492 mpTJI->emitFunctionStub(F, Addr, *this);
1493 finishGVStub();
1494
1495 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1496 SL.Size, true);
1497
1498 PendingFunctions.erase(I);
1499}
1500
1501
1502void CodeEmitter::getFunctionNames(BCCsizei *actualFunctionCount,
1503 BCCsizei maxFunctionCount,
1504 BCCchar **functions) {
1505 int functionCount = mEmittedFunctions.size();
1506
1507 if (actualFunctionCount)
1508 *actualFunctionCount = functionCount;
1509 if (functionCount > maxFunctionCount)
1510 functionCount = maxFunctionCount;
1511 if (functions)
1512 for (EmittedFunctionsMapTy::const_iterator
1513 I = mEmittedFunctions.begin(), E = mEmittedFunctions.end();
1514 I != E && (functionCount > 0); I++, functionCount--) {
1515 *functions++ = const_cast<BCCchar*>(I->first.c_str());
1516 }
1517}
1518
1519
1520void CodeEmitter::getFunctionBinary(BCCchar *label,
1521 BCCvoid **base,
1522 BCCsizei *length) {
1523 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(label);
1524 if (I == mEmittedFunctions.end()) {
1525 *base = NULL;
1526 *length = 0;
1527 } else {
1528 *base = I->second->Code;
1529 *length = I->second->Size;
1530 }
1531}
1532
1533} // namespace bcc