blob: 9021990c8b9ef8b24e0d388533a1fedc3b682a95 [file] [log] [blame]
Logan28325bf2010-11-26 23:27:41 +08001/*
2 * Copyright 2010, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bcc_code_emitter.h"
18
19#include "bcc_buff_mem_object.h"
20#include "bcc_code_mem_manager.h"
21#include "bcc_emitted_func_code.h"
Logan4eea1192010-11-26 23:31:57 +080022#include "bcc_runtime.h"
Logan28325bf2010-11-26 23:27:41 +080023
24#include <bcc/bcc.h>
25#include <bcc/bcc_cache.h>
26
27#include "llvm/ADT/APFloat.h"
28#include "llvm/ADT/APInt.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/ADT/StringRef.h"
32
33#include "llvm/CodeGen/MachineBasicBlock.h"
34#include "llvm/CodeGen/MachineConstantPool.h"
35#include "llvm/CodeGen/MachineFunction.h"
36#include "llvm/CodeGen/MachineModuleInfo.h"
37#include "llvm/CodeGen/MachineRelocation.h"
38#include "llvm/CodeGen/MachineJumpTableInfo.h"
39#include "llvm/CodeGen/JITCodeEmitter.h"
40
41#include "llvm/ExecutionEngine/GenericValue.h"
42
43#include "llvm/MC/MCAsmInfo.h"
44#include "llvm/MC/MCDisassembler.h"
45#include "llvm/MC/MCInst.h"
46#include "llvm/MC/MCInstPrinter.h"
47
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/raw_ostream.h"
50
51#include "llvm/System/Host.h"
52
53#include "llvm/Target/TargetData.h"
54#include "llvm/Target/TargetMachine.h"
55#include "llvm/Target/TargetRegistry.h"
56#include "llvm/Target/TargetJITInfo.h"
57
58#include "llvm/Constant.h"
59#include "llvm/Constants.h"
60#include "llvm/DerivedTypes.h"
61#include "llvm/Function.h"
62#include "llvm/GlobalAlias.h"
63#include "llvm/GlobalValue.h"
64#include "llvm/GlobalVariable.h"
65#include "llvm/Instruction.h"
66#include "llvm/Type.h"
67
68#include <algorithm>
69#include <vector>
70#include <set>
71#include <string>
72
73#include <stddef.h>
74
75
76namespace bcc {
77
78// Will take the ownership of @MemMgr
79CodeEmitter::CodeEmitter(CodeMemoryManager *pMemMgr)
80 : mpMemMgr(pMemMgr),
81 mpTarget(NULL),
82 mpTJI(NULL),
83 mpTD(NULL),
84 mpCurEmitFunction(NULL),
85 mpConstantPool(NULL),
86 mpJumpTable(NULL),
87 mpMMI(NULL),
88#if defined(USE_DISASSEMBLER)
89 mpAsmInfo(NULL),
90 mpDisassmbler(NULL),
91 mpIP(NULL),
92#endif
93 mpSymbolLookupFn(NULL),
94 mpSymbolLookupContext(NULL) {
95}
96
97
98CodeEmitter::~CodeEmitter() {
99 delete mpMemMgr;
100#if defined(USE_DISASSEMBLER)
101 delete mpAsmInfo;
102 delete mpDisassmbler;
103 delete mpIP;
104#endif
105}
106
107
108// Once you finish the compilation on a translation unit, you can call this
109// function to recycle the memory (which is used at compilation time and not
110// needed for runtime).
111//
112// NOTE: You should not call this funtion until the code-gen passes for a
113// given module is done. Otherwise, the results is undefined and may
114// cause the system crash!
115void CodeEmitter::releaseUnnecessary() {
116 mMBBLocations.clear();
117 mLabelLocations.clear();
118 mGlobalAddressMap.clear();
119 mFunctionToLazyStubMap.clear();
120 GlobalToIndirectSymMap.clear();
121 ExternalFnToStubMap.clear();
122 PendingFunctions.clear();
123}
124
125
126void CodeEmitter::reset() {
127 releaseUnnecessary();
128
129 mpSymbolLookupFn = NULL;
130 mpSymbolLookupContext = NULL;
131
132 mpTJI = NULL;
133 mpTD = NULL;
134
135 for (EmittedFunctionsMapTy::iterator I = mEmittedFunctions.begin(),
136 E = mEmittedFunctions.end();
137 I != E;
138 I++)
139 if (I->second != NULL)
140 delete I->second;
141 mEmittedFunctions.clear();
142
143 mpMemMgr->reset();
144}
145
146
147void *CodeEmitter::UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
148 if (Addr == NULL) {
149 // Removing mapping
150 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
151 void *OldVal;
152
153 if (I == mGlobalAddressMap.end()) {
154 OldVal = NULL;
155 } else {
156 OldVal = I->second;
157 mGlobalAddressMap.erase(I);
158 }
159
160 return OldVal;
161 }
162
163 void *&CurVal = mGlobalAddressMap[GV];
164 void *OldVal = CurVal;
165
166 CurVal = Addr;
167
168 return OldVal;
169}
170
171
172unsigned int CodeEmitter::GetConstantPoolSizeInBytes(
173 llvm::MachineConstantPool *MCP) {
174 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
175 MCP->getConstants();
176
177 if (Constants.empty())
178 return 0;
179
180 unsigned int Size = 0;
181 for (int i = 0, e = Constants.size(); i != e; i++) {
182 llvm::MachineConstantPoolEntry CPE = Constants[i];
183 unsigned int AlignMask = CPE.getAlignment() - 1;
184 Size = (Size + AlignMask) & ~AlignMask;
185 const llvm::Type *Ty = CPE.getType();
186 Size += mpTD->getTypeAllocSize(Ty);
187 }
188
189 return Size;
190}
191
192// This function converts a Constant* into a GenericValue. The interesting
193// part is if C is a ConstantExpr.
194void CodeEmitter::GetConstantValue(const llvm::Constant *C,
195 llvm::GenericValue &Result) {
196 if (C->getValueID() == llvm::Value::UndefValueVal)
197 return;
198 else if (C->getValueID() == llvm::Value::ConstantExprVal) {
199 const llvm::ConstantExpr *CE = (llvm::ConstantExpr*) C;
200 const llvm::Constant *Op0 = CE->getOperand(0);
201
202 switch (CE->getOpcode()) {
203 case llvm::Instruction::GetElementPtr: {
204 // Compute the index
205 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
206 CE->op_end());
207 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
208 &Indices[0],
209 Indices.size());
210
211 GetConstantValue(Op0, Result);
212 Result.PointerVal =
213 static_cast<uint8_t*>(Result.PointerVal) + Offset;
214
215 return;
216 }
217 case llvm::Instruction::Trunc: {
218 uint32_t BitWidth =
219 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
220
221 GetConstantValue(Op0, Result);
222 Result.IntVal = Result.IntVal.trunc(BitWidth);
223
224 return;
225 }
226 case llvm::Instruction::ZExt: {
227 uint32_t BitWidth =
228 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
229
230 GetConstantValue(Op0, Result);
231 Result.IntVal = Result.IntVal.zext(BitWidth);
232
233 return;
234 }
235 case llvm::Instruction::SExt: {
236 uint32_t BitWidth =
237 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
238
239 GetConstantValue(Op0, Result);
240 Result.IntVal = Result.IntVal.sext(BitWidth);
241
242 return;
243 }
244 case llvm::Instruction::FPTrunc: {
245 // TODO(all): fixme: long double
246 GetConstantValue(Op0, Result);
247 Result.FloatVal = static_cast<float>(Result.DoubleVal);
248 return;
249 }
250 case llvm::Instruction::FPExt: {
251 // TODO(all): fixme: long double
252 GetConstantValue(Op0, Result);
253 Result.DoubleVal = static_cast<double>(Result.FloatVal);
254 return;
255 }
256 case llvm::Instruction::UIToFP: {
257 GetConstantValue(Op0, Result);
258 if (CE->getType()->isFloatTy())
259 Result.FloatVal =
260 static_cast<float>(Result.IntVal.roundToDouble());
261 else if (CE->getType()->isDoubleTy())
262 Result.DoubleVal = Result.IntVal.roundToDouble();
263 else if (CE->getType()->isX86_FP80Ty()) {
264 const uint64_t zero[] = { 0, 0 };
265 llvm::APFloat apf(llvm::APInt(80, 2, zero));
266 apf.convertFromAPInt(Result.IntVal,
267 false,
268 llvm::APFloat::rmNearestTiesToEven);
269 Result.IntVal = apf.bitcastToAPInt();
270 }
271 return;
272 }
273 case llvm::Instruction::SIToFP: {
274 GetConstantValue(Op0, Result);
275 if (CE->getType()->isFloatTy())
276 Result.FloatVal =
277 static_cast<float>(Result.IntVal.signedRoundToDouble());
278 else if (CE->getType()->isDoubleTy())
279 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
280 else if (CE->getType()->isX86_FP80Ty()) {
281 const uint64_t zero[] = { 0, 0 };
282 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
283 apf.convertFromAPInt(Result.IntVal,
284 true,
285 llvm::APFloat::rmNearestTiesToEven);
286 Result.IntVal = apf.bitcastToAPInt();
287 }
288 return;
289 }
290 // double->APInt conversion handles sign
291 case llvm::Instruction::FPToUI:
292 case llvm::Instruction::FPToSI: {
293 uint32_t BitWidth =
294 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
295
296 GetConstantValue(Op0, Result);
297 if (Op0->getType()->isFloatTy())
298 Result.IntVal =
299 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
300 else if (Op0->getType()->isDoubleTy())
301 Result.IntVal =
302 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal,
303 BitWidth);
304 else if (Op0->getType()->isX86_FP80Ty()) {
305 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
306 uint64_t V;
307 bool Ignored;
308 apf.convertToInteger(&V,
309 BitWidth,
310 CE->getOpcode() == llvm::Instruction::FPToSI,
311 llvm::APFloat::rmTowardZero,
312 &Ignored);
313 Result.IntVal = V; // endian?
314 }
315 return;
316 }
317 case llvm::Instruction::PtrToInt: {
318 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
319
320 GetConstantValue(Op0, Result);
321 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
322 (Result.PointerVal));
323
324 return;
325 }
326 case llvm::Instruction::IntToPtr: {
327 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
328
329 GetConstantValue(Op0, Result);
330 if (PtrWidth != Result.IntVal.getBitWidth())
331 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
332 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
333
334 Result.PointerVal =
335 llvm::PointerTy(
336 static_cast<uintptr_t>(Result.IntVal.getZExtValue()));
337
338 return;
339 }
340 case llvm::Instruction::BitCast: {
341 GetConstantValue(Op0, Result);
342 const llvm::Type *DestTy = CE->getType();
343
344 switch (Op0->getType()->getTypeID()) {
345 case llvm::Type::IntegerTyID: {
346 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
347 if (DestTy->isFloatTy())
348 Result.FloatVal = Result.IntVal.bitsToFloat();
349 else if (DestTy->isDoubleTy())
350 Result.DoubleVal = Result.IntVal.bitsToDouble();
351 break;
352 }
353 case llvm::Type::FloatTyID: {
354 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
355 Result.IntVal.floatToBits(Result.FloatVal);
356 break;
357 }
358 case llvm::Type::DoubleTyID: {
359 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
360 Result.IntVal.doubleToBits(Result.DoubleVal);
361 break;
362 }
363 case llvm::Type::PointerTyID: {
364 assert(DestTy->isPointerTy() && "Invalid bitcast");
365 break; // getConstantValue(Op0) above already converted it
366 }
367 default: {
368 llvm_unreachable("Invalid bitcast operand");
369 }
370 }
371 return;
372 }
373 case llvm::Instruction::Add:
374 case llvm::Instruction::FAdd:
375 case llvm::Instruction::Sub:
376 case llvm::Instruction::FSub:
377 case llvm::Instruction::Mul:
378 case llvm::Instruction::FMul:
379 case llvm::Instruction::UDiv:
380 case llvm::Instruction::SDiv:
381 case llvm::Instruction::URem:
382 case llvm::Instruction::SRem:
383 case llvm::Instruction::And:
384 case llvm::Instruction::Or:
385 case llvm::Instruction::Xor: {
386 llvm::GenericValue LHS, RHS;
387 GetConstantValue(Op0, LHS);
388 GetConstantValue(CE->getOperand(1), RHS);
389
390 switch (Op0->getType()->getTypeID()) {
391 case llvm::Type::IntegerTyID: {
392 switch (CE->getOpcode()) {
393 case llvm::Instruction::Add: {
394 Result.IntVal = LHS.IntVal + RHS.IntVal;
395 break;
396 }
397 case llvm::Instruction::Sub: {
398 Result.IntVal = LHS.IntVal - RHS.IntVal;
399 break;
400 }
401 case llvm::Instruction::Mul: {
402 Result.IntVal = LHS.IntVal * RHS.IntVal;
403 break;
404 }
405 case llvm::Instruction::UDiv: {
406 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
407 break;
408 }
409 case llvm::Instruction::SDiv: {
410 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
411 break;
412 }
413 case llvm::Instruction::URem: {
414 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
415 break;
416 }
417 case llvm::Instruction::SRem: {
418 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
419 break;
420 }
421 case llvm::Instruction::And: {
422 Result.IntVal = LHS.IntVal & RHS.IntVal;
423 break;
424 }
425 case llvm::Instruction::Or: {
426 Result.IntVal = LHS.IntVal | RHS.IntVal;
427 break;
428 }
429 case llvm::Instruction::Xor: {
430 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
431 break;
432 }
433 default: {
434 llvm_unreachable("Invalid integer opcode");
435 }
436 }
437 break;
438 }
439 case llvm::Type::FloatTyID: {
440 switch (CE->getOpcode()) {
441 case llvm::Instruction::FAdd: {
442 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
443 break;
444 }
445 case llvm::Instruction::FSub: {
446 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
447 break;
448 }
449 case llvm::Instruction::FMul: {
450 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
451 break;
452 }
453 case llvm::Instruction::FDiv: {
454 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
455 break;
456 }
457 case llvm::Instruction::FRem: {
458 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
459 break;
460 }
461 default: {
462 llvm_unreachable("Invalid float opcode");
463 }
464 }
465 break;
466 }
467 case llvm::Type::DoubleTyID: {
468 switch (CE->getOpcode()) {
469 case llvm::Instruction::FAdd: {
470 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
471 break;
472 }
473 case llvm::Instruction::FSub: {
474 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
475 break;
476 }
477 case llvm::Instruction::FMul: {
478 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
479 break;
480 }
481 case llvm::Instruction::FDiv: {
482 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
483 break;
484 }
485 case llvm::Instruction::FRem: {
486 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
487 break;
488 }
489 default: {
490 llvm_unreachable("Invalid double opcode");
491 }
492 }
493 break;
494 }
495 case llvm::Type::X86_FP80TyID:
496 case llvm::Type::PPC_FP128TyID:
497 case llvm::Type::FP128TyID: {
498 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
499 switch (CE->getOpcode()) {
500 case llvm::Instruction::FAdd: {
501 apfLHS.add(llvm::APFloat(RHS.IntVal),
502 llvm::APFloat::rmNearestTiesToEven);
503 break;
504 }
505 case llvm::Instruction::FSub: {
506 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
507 llvm::APFloat::rmNearestTiesToEven);
508 break;
509 }
510 case llvm::Instruction::FMul: {
511 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
512 llvm::APFloat::rmNearestTiesToEven);
513 break;
514 }
515 case llvm::Instruction::FDiv: {
516 apfLHS.divide(llvm::APFloat(RHS.IntVal),
517 llvm::APFloat::rmNearestTiesToEven);
518 break;
519 }
520 case llvm::Instruction::FRem: {
521 apfLHS.mod(llvm::APFloat(RHS.IntVal),
522 llvm::APFloat::rmNearestTiesToEven);
523 break;
524 }
525 default: {
526 llvm_unreachable("Invalid long double opcode");
527 }
528 }
529 Result.IntVal = apfLHS.bitcastToAPInt();
530 break;
531 }
532 default: {
533 llvm_unreachable("Bad add type!");
534 }
535 } // End switch (Op0->getType()->getTypeID())
536 return;
537 }
538 default: {
539 break;
540 }
541 } // End switch (CE->getOpcode())
542
543 std::string msg;
544 llvm::raw_string_ostream Msg(msg);
545 Msg << "ConstantExpr not handled: " << *CE;
546 llvm::report_fatal_error(Msg.str());
547 } // C->getValueID() == llvm::Value::ConstantExprVal
548
549 switch (C->getType()->getTypeID()) {
550 case llvm::Type::FloatTyID: {
551 Result.FloatVal =
552 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToFloat();
553 break;
554 }
555 case llvm::Type::DoubleTyID: {
556 Result.DoubleVal =
557 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToDouble();
558 break;
559 }
560 case llvm::Type::X86_FP80TyID:
561 case llvm::Type::FP128TyID:
562 case llvm::Type::PPC_FP128TyID: {
563 Result.IntVal =
564 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().bitcastToAPInt();
565 break;
566 }
567 case llvm::Type::IntegerTyID: {
568 Result.IntVal =
569 llvm::cast<llvm::ConstantInt>(C)->getValue();
570 break;
571 }
572 case llvm::Type::PointerTyID: {
573 switch (C->getValueID()) {
574 case llvm::Value::ConstantPointerNullVal: {
575 Result.PointerVal = NULL;
576 break;
577 }
578 case llvm::Value::FunctionVal: {
579 const llvm::Function *F = static_cast<const llvm::Function*>(C);
580 Result.PointerVal =
581 GetPointerToFunctionOrStub(const_cast<llvm::Function*>(F));
582 break;
583 }
584 case llvm::Value::GlobalVariableVal: {
585 const llvm::GlobalVariable *GV =
586 static_cast<const llvm::GlobalVariable*>(C);
587 Result.PointerVal =
588 GetOrEmitGlobalVariable(const_cast<llvm::GlobalVariable*>(GV));
589 break;
590 }
591 case llvm::Value::BlockAddressVal: {
592 assert(false && "JIT does not support address-of-label yet!");
593 }
594 default: {
595 llvm_unreachable("Unknown constant pointer type!");
596 }
597 }
598 break;
599 }
600 default: {
601 std::string msg;
602 llvm::raw_string_ostream Msg(msg);
603 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
604 llvm::report_fatal_error(Msg.str());
605 break;
606 }
607 }
608 return;
609}
610
611
612// Stores the data in @Val of type @Ty at address @Addr.
613void CodeEmitter::StoreValueToMemory(const llvm::GenericValue &Val,
614 void *Addr,
615 const llvm::Type *Ty) {
616 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
617
618 switch (Ty->getTypeID()) {
619 case llvm::Type::IntegerTyID: {
620 const llvm::APInt &IntVal = Val.IntVal;
621 assert(((IntVal.getBitWidth() + 7) / 8 >= StoreBytes) &&
622 "Integer too small!");
623
624 const uint8_t *Src =
625 reinterpret_cast<const uint8_t*>(IntVal.getRawData());
626
627 if (llvm::sys::isLittleEndianHost()) {
628 // Little-endian host - the source is ordered from LSB to MSB.
629 // Order the destination from LSB to MSB: Do a straight copy.
630 memcpy(Addr, Src, StoreBytes);
631 } else {
632 // Big-endian host - the source is an array of 64 bit words
633 // ordered from LSW to MSW.
634 //
635 // Each word is ordered from MSB to LSB.
636 //
637 // Order the destination from MSB to LSB:
638 // Reverse the word order, but not the bytes in a word.
639 unsigned int i = StoreBytes;
640 while (i > sizeof(uint64_t)) {
641 i -= sizeof(uint64_t);
642 ::memcpy(reinterpret_cast<uint8_t*>(Addr) + i,
643 Src,
644 sizeof(uint64_t));
645 Src += sizeof(uint64_t);
646 }
647 ::memcpy(Addr, Src + sizeof(uint64_t) - i, i);
648 }
649 break;
650 }
651 case llvm::Type::FloatTyID: {
652 *reinterpret_cast<float*>(Addr) = Val.FloatVal;
653 break;
654 }
655 case llvm::Type::DoubleTyID: {
656 *reinterpret_cast<double*>(Addr) = Val.DoubleVal;
657 break;
658 }
659 case llvm::Type::X86_FP80TyID: {
660 memcpy(Addr, Val.IntVal.getRawData(), 10);
661 break;
662 }
663 case llvm::Type::PointerTyID: {
664 // Ensure 64 bit target pointers are fully initialized on 32 bit
665 // hosts.
666 if (StoreBytes != sizeof(llvm::PointerTy))
667 memset(Addr, 0, StoreBytes);
668 *((llvm::PointerTy*) Addr) = Val.PointerVal;
669 break;
670 }
671 default: {
672 break;
673 }
674 }
675
676 if (llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
677 std::reverse(reinterpret_cast<uint8_t*>(Addr),
678 reinterpret_cast<uint8_t*>(Addr) + StoreBytes);
679
680 return;
681}
682
683
684// Recursive function to apply a @Constant value into the specified memory
685// location @Addr.
686void CodeEmitter::InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
687 switch (C->getValueID()) {
688 case llvm::Value::UndefValueVal: {
689 // Nothing to do
690 break;
691 }
692 case llvm::Value::ConstantVectorVal: {
693 // dynamic cast may hurt performance
694 const llvm::ConstantVector *CP = (llvm::ConstantVector*) C;
695
696 unsigned int ElementSize = mpTD->getTypeAllocSize
697 (CP->getType()->getElementType());
698
699 for (int i = 0, e = CP->getNumOperands(); i != e;i++)
700 InitializeConstantToMemory(
701 CP->getOperand(i),
702 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
703 break;
704 }
705 case llvm::Value::ConstantAggregateZeroVal: {
706 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
707 break;
708 }
709 case llvm::Value::ConstantArrayVal: {
710 const llvm::ConstantArray *CPA = (llvm::ConstantArray*) C;
711 unsigned int ElementSize = mpTD->getTypeAllocSize
712 (CPA->getType()->getElementType());
713
714 for (int i = 0, e = CPA->getNumOperands(); i != e; i++)
715 InitializeConstantToMemory(
716 CPA->getOperand(i),
717 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
718 break;
719 }
720 case llvm::Value::ConstantStructVal: {
721 const llvm::ConstantStruct *CPS =
722 static_cast<const llvm::ConstantStruct*>(C);
723 const llvm::StructLayout *SL = mpTD->getStructLayout
724 (llvm::cast<llvm::StructType>(CPS->getType()));
725
726 for (int i = 0, e = CPS->getNumOperands(); i != e; i++)
727 InitializeConstantToMemory(
728 CPS->getOperand(i),
729 reinterpret_cast<uint8_t*>(Addr) + SL->getElementOffset(i));
730 break;
731 }
732 default: {
733 if (C->getType()->isFirstClassType()) {
734 llvm::GenericValue Val;
735 GetConstantValue(C, Val);
736 StoreValueToMemory(Val, Addr, C->getType());
737 } else {
738 llvm_unreachable("Unknown constant type to initialize memory "
739 "with!");
740 }
741 break;
742 }
743 }
744 return;
745}
746
747
748void CodeEmitter::emitConstantPool(llvm::MachineConstantPool *MCP) {
749 if (mpTJI->hasCustomConstantPool())
750 return;
751
752 // Constant pool address resolution is handled by the target itself in ARM
753 // (TargetJITInfo::hasCustomConstantPool() returns true).
754#if !defined(PROVIDE_ARM_CODEGEN)
755 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
756 MCP->getConstants();
757
758 if (Constants.empty())
759 return;
760
761 unsigned Size = GetConstantPoolSizeInBytes(MCP);
762 unsigned Align = MCP->getConstantPoolAlignment();
763
764 mpConstantPoolBase = allocateSpace(Size, Align);
765 mpConstantPool = MCP;
766
767 if (mpConstantPoolBase == NULL)
768 return; // out of memory
769
770 unsigned Offset = 0;
771 for (int i = 0, e = Constants.size(); i != e; i++) {
772 llvm::MachineConstantPoolEntry CPE = Constants[i];
773 unsigned AlignMask = CPE.getAlignment() - 1;
774 Offset = (Offset + AlignMask) & ~AlignMask;
775
776 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
777 mConstPoolAddresses.push_back(CAddr);
778
779 if (CPE.isMachineConstantPoolEntry())
780 llvm::report_fatal_error
781 ("Initialize memory with machine specific constant pool"
782 " entry has not been implemented!");
783
784 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
785
786 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
787 Offset += mpTD->getTypeAllocSize(Ty);
788 }
789#endif
790 return;
791}
792
793
794void CodeEmitter::initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
795 if (mpTJI->hasCustomJumpTables())
796 return;
797
798 const std::vector<llvm::MachineJumpTableEntry> &JT =
799 MJTI->getJumpTables();
800 if (JT.empty())
801 return;
802
803 unsigned NumEntries = 0;
804 for (int i = 0, e = JT.size(); i != e; i++)
805 NumEntries += JT[i].MBBs.size();
806
807 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
808
809 mpJumpTable = MJTI;
810 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
811 MJTI->getEntryAlignment(*mpTD));
812
813 return;
814}
815
816
817void CodeEmitter::emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
818 if (mpTJI->hasCustomJumpTables())
819 return;
820
821 const std::vector<llvm::MachineJumpTableEntry> &JT =
822 MJTI->getJumpTables();
823 if (JT.empty() || mpJumpTableBase == 0)
824 return;
825
826 assert(llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static &&
827 (MJTI->getEntrySize(*mpTD) == sizeof(mpTD /* a pointer type */)) &&
828 "Cross JIT'ing?");
829
830 // For each jump table, map each target in the jump table to the
831 // address of an emitted MachineBasicBlock.
832 intptr_t *SlotPtr = reinterpret_cast<intptr_t*>(mpJumpTableBase);
833 for (int i = 0, ie = JT.size(); i != ie; i++) {
834 const std::vector<llvm::MachineBasicBlock*> &MBBs = JT[i].MBBs;
835 // Store the address of the basic block for this jump table slot in the
836 // memory we allocated for the jump table in 'initJumpTableInfo'
837 for (int j = 0, je = MBBs.size(); j != je; j++)
838 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
839 }
840}
841
842
843void *CodeEmitter::GetPointerToGlobal(llvm::GlobalValue *V,
844 void *Reference,
845 bool MayNeedFarStub) {
846 switch (V->getValueID()) {
847 case llvm::Value::FunctionVal: {
848 llvm::Function *F = (llvm::Function*) V;
849
850 // If we have code, go ahead and return that.
851 if (void *ResultPtr = GetPointerToGlobalIfAvailable(F))
852 return ResultPtr;
853
854 if (void *FnStub = GetLazyFunctionStubIfAvailable(F))
855 // Return the function stub if it's already created.
856 // We do this first so that:
857 // we're returning the same address for the function as any
858 // previous call.
859 //
860 // TODO(llvm.org): Yes, this is wrong. The lazy stub isn't
861 // guaranteed to be close enough to call.
862 return FnStub;
863
864 // If we know the target can handle arbitrary-distance calls, try to
865 // return a direct pointer.
866 if (!MayNeedFarStub) {
867 //
868 // x86_64 architecture may encounter the bug:
869 // http://llvm.org/bugs/show_bug.cgi?id=5201
870 // which generate instruction "call" instead of "callq".
871 //
872 // And once the real address of stub is greater than 64-bit
873 // long, the replacement will truncate to 32-bit resulting a
874 // serious problem.
875#if !defined(__x86_64__)
876 // If this is an external function pointer, we can force the JIT
877 // to 'compile' it, which really just adds it to the map.
878 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
879 return GetPointerToFunction(F, /* AbortOnFailure = */false);
880 // Changing to false because wanting to allow later calls to
881 // mpTJI->relocate() without aborting. For caching purpose
882 }
883#endif
884 }
885
886 // Otherwise, we may need a to emit a stub, and, conservatively, we
887 // always do so.
888 return GetLazyFunctionStub(F);
889 break;
890 }
891 case llvm::Value::GlobalVariableVal: {
892 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
893 break;
894 }
895 case llvm::Value::GlobalAliasVal: {
896 llvm::GlobalAlias *GA = (llvm::GlobalAlias*) V;
897 const llvm::GlobalValue *GV = GA->resolveAliasedGlobal(false);
898
899 switch (GV->getValueID()) {
900 case llvm::Value::FunctionVal: {
901 // TODO(all): is there's any possibility that the function is not
902 // code-gen'd?
903 return GetPointerToFunction(
904 static_cast<const llvm::Function*>(GV),
905 /* AbortOnFailure = */false);
906 // Changing to false because wanting to allow later calls to
907 // mpTJI->relocate() without aborting. For caching purpose
908 break;
909 }
910 case llvm::Value::GlobalVariableVal: {
911 if (void *P = mGlobalAddressMap[GV])
912 return P;
913
914 llvm::GlobalVariable *GVar = (llvm::GlobalVariable*) GV;
915 EmitGlobalVariable(GVar);
916
917 return mGlobalAddressMap[GV];
918 break;
919 }
920 case llvm::Value::GlobalAliasVal: {
921 assert(false && "Alias should be resolved ultimately!");
922 }
923 }
924 break;
925 }
926 default: {
927 break;
928 }
929 }
930 llvm_unreachable("Unknown type of global value!");
931}
932
933
934// If the specified function has been code-gen'd, return a pointer to the
935// function. If not, compile it, or use a stub to implement lazy compilation
936// if available.
937void *CodeEmitter::GetPointerToFunctionOrStub(llvm::Function *F) {
938 // If we have already code generated the function, just return the
939 // address.
940 if (void *Addr = GetPointerToGlobalIfAvailable(F))
941 return Addr;
942
943 // Get a stub if the target supports it.
944 return GetLazyFunctionStub(F);
945}
946
947
948void *CodeEmitter::GetLazyFunctionStub(llvm::Function *F) {
949 // If we already have a lazy stub for this function, recycle it.
950 void *&Stub = mFunctionToLazyStubMap[F];
951 if (Stub)
952 return Stub;
953
954 // In any cases, we should NOT resolve function at runtime (though we are
955 // able to). We resolve this right now.
956 void *Actual = NULL;
957 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
958 Actual = GetPointerToFunction(F, /* AbortOnFailure = */false);
959 // Changing to false because wanting to allow later calls to
960 // mpTJI->relocate() without aborting. For caching purpose
961 }
962
963 // Codegen a new stub, calling the actual address of the external
964 // function, if it was resolved.
965 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
966 startGVStub(F, SL.Size, SL.Alignment);
967 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
968 finishGVStub();
969
970 // We really want the address of the stub in the GlobalAddressMap for the
971 // JIT, not the address of the external function.
972 UpdateGlobalMapping(F, Stub);
973
974 if (!Actual)
975 PendingFunctions.insert(F);
976 else
977 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
978 SL.Size, true);
979
980 return Stub;
981}
982
983
984void *CodeEmitter::GetPointerToFunction(const llvm::Function *F,
985 bool AbortOnFailure) {
986 void *Addr = GetPointerToGlobalIfAvailable(F);
987 if (Addr)
988 return Addr;
989
990 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
991 "Internal error: only external defined function routes here!");
992
993 // Handle the failure resolution by ourselves.
994 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
995 /* AbortOnFailure = */ false);
996
997 // If we resolved the symbol to a null address (eg. a weak external)
998 // return a null pointer let the application handle it.
999 if (Addr == NULL) {
1000 if (AbortOnFailure)
1001 llvm::report_fatal_error("Could not resolve external function "
1002 "address: " + F->getName());
1003 else
1004 return NULL;
1005 }
1006
1007 AddGlobalMapping(F, Addr);
1008
1009 return Addr;
1010}
1011
1012
1013void *CodeEmitter::GetPointerToNamedSymbol(const std::string &Name,
1014 bool AbortOnFailure) {
1015 if (void *Addr = FindRuntimeFunction(Name.c_str()))
1016 return Addr;
1017
1018 if (mpSymbolLookupFn)
1019 if (void *Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1020 return Addr;
1021
1022 if (AbortOnFailure)
1023 llvm::report_fatal_error("Program used external symbol '" + Name +
1024 "' which could not be resolved!");
1025
1026 return NULL;
1027}
1028
1029
1030// Return the address of the specified global variable, possibly emitting it
1031// to memory if needed. This is used by the Emitter.
1032void *CodeEmitter::GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1033 void *Ptr = GetPointerToGlobalIfAvailable(GV);
1034 if (Ptr)
1035 return Ptr;
1036
1037 if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1038 // If the global is external, just remember the address.
1039 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1040 AddGlobalMapping(GV, Ptr);
1041 } else {
1042 // If the global hasn't been emitted to memory yet, allocate space and
1043 // emit it into memory.
1044 Ptr = GetMemoryForGV(GV);
1045 AddGlobalMapping(GV, Ptr);
1046 EmitGlobalVariable(GV);
1047 }
1048
1049 return Ptr;
1050}
1051
1052
1053// This method abstracts memory allocation of global variable so that the
1054// JIT can allocate thread local variables depending on the target.
1055void *CodeEmitter::GetMemoryForGV(const llvm::GlobalVariable *GV) {
1056 void *Ptr;
1057
1058 const llvm::Type *GlobalType = GV->getType()->getElementType();
1059 size_t S = mpTD->getTypeAllocSize(GlobalType);
1060 size_t A = mpTD->getPreferredAlignment(GV);
1061
1062 if (GV->isThreadLocal()) {
1063 // We can support TLS by
1064 //
1065 // Ptr = TJI.allocateThreadLocalMemory(S);
1066 //
1067 // But I tend not to.
1068 // (should we disable this in the front-end (i.e., slang)?).
1069 llvm::report_fatal_error
1070 ("Compilation of Thread Local Storage (TLS) is disabled!");
1071
1072 } else if (mpTJI->allocateSeparateGVMemory()) {
1073 if (A <= 8) {
1074 Ptr = malloc(S);
1075 } else {
1076 // Allocate (S + A) bytes of memory, then use an aligned pointer
1077 // within that space.
1078 Ptr = malloc(S + A);
1079 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1080 Ptr = reinterpret_cast<uint8_t*>(Ptr) +
1081 (MisAligned ? (A - MisAligned) : 0);
1082 }
1083 } else {
1084 Ptr = allocateGlobal(S, A);
1085 }
1086
1087 return Ptr;
1088}
1089
1090
1091void CodeEmitter::EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1092 void *GA = GetPointerToGlobalIfAvailable(GV);
1093
1094 if (GV->isThreadLocal())
1095 llvm::report_fatal_error
1096 ("We don't support Thread Local Storage (TLS)!");
1097
1098 if (GA == NULL) {
1099 // If it's not already specified, allocate memory for the global.
1100 GA = GetMemoryForGV(GV);
1101 AddGlobalMapping(GV, GA);
1102 }
1103
1104 InitializeConstantToMemory(GV->getInitializer(), GA);
1105
1106 // You can do some statistics on global variable here.
1107 return;
1108}
1109
1110
1111void *CodeEmitter::GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1112 // Make sure GV is emitted first, and create a stub containing the fully
1113 // resolved address.
1114 void *GVAddress = GetPointerToGlobal(V, Reference, false);
1115
1116 // If we already have a stub for this global variable, recycle it.
1117 void *&IndirectSym = GlobalToIndirectSymMap[V];
1118 // Otherwise, codegen a new indirect symbol.
1119 if (!IndirectSym)
1120 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1121
1122 return IndirectSym;
1123}
1124
1125
1126// Return a stub for the function at the specified address.
1127void *CodeEmitter::GetExternalFunctionStub(void *FnAddr) {
1128 void *&Stub = ExternalFnToStubMap[FnAddr];
1129 if (Stub)
1130 return Stub;
1131
1132 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1133 startGVStub(0, SL.Size, SL.Alignment);
1134 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1135 finishGVStub();
1136
1137 return Stub;
1138}
1139
1140
1141#if defined(USE_DISASSEMBLER)
1142void CodeEmitter::Disassemble(const llvm::StringRef &Name,
1143 uint8_t *Start, size_t Length, bool IsStub) {
1144 llvm::raw_ostream *OS;
1145
1146#if defined(USE_DISASSEMBLER_FILE)
1147 std::string ErrorInfo;
1148 OS = new llvm::raw_fd_ostream("/data/local/tmp/out.S",
1149 ErrorInfo,
1150 llvm::raw_fd_ostream::F_Append);
1151 if (!ErrorInfo.empty()) { // some errors occurred
1152 // LOGE("Error in creating disassembly file");
1153 delete OS;
1154 return;
1155 }
1156#else
1157 OS = &llvm::outs();
1158#endif
1159
1160 *OS << "JIT: Disassembled code: " << Name << ((IsStub) ? " (stub)" : "")
1161 << "\n";
1162
1163 if (mpAsmInfo == NULL)
Loganf2b79d02010-11-27 01:07:53 +08001164 mpAsmInfo = mpTarget->createAsmInfo(Compiler::Triple);
Logan28325bf2010-11-26 23:27:41 +08001165 if (mpDisassmbler == NULL)
1166 mpDisassmbler = mpTarget->createMCDisassembler();
1167 if (mpIP == NULL)
1168 mpIP = mpTarget->createMCInstPrinter(mpAsmInfo->getAssemblerDialect(),
1169 *mpAsmInfo);
1170
1171 const BufferMemoryObject *BufferMObj = new BufferMemoryObject(Start,
1172 Length);
1173 uint64_t Size;
1174 uint64_t Index;
1175
1176 for (Index = 0; Index < Length; Index += Size) {
1177 llvm::MCInst Inst;
1178
1179 if (mpDisassmbler->getInstruction(Inst, Size, *BufferMObj, Index,
1180 /* REMOVED */ llvm::nulls())) {
1181 (*OS).indent(4)
1182 .write("0x", 2)
1183 .write_hex((uint32_t) Start + Index)
1184 .write(':');
1185 mpIP->printInst(&Inst, *OS);
1186 *OS << "\n";
1187 } else {
1188 if (Size == 0)
1189 Size = 1; // skip illegible bytes
1190 }
1191 }
1192
1193 *OS << "\n";
1194 delete BufferMObj;
1195
1196#if defined(USE_DISASSEMBLER_FILE)
1197 // If you want the disassemble results write to file, uncomment this.
1198 OS->close();
1199 delete OS;
1200#endif
1201
1202 return;
1203}
1204#endif // defined(USE_DISASSEMBLER)
1205
1206
1207void CodeEmitter::setTargetMachine(llvm::TargetMachine &TM) {
1208 // Set Target
1209 mpTarget = &TM.getTarget();
1210 // Set TargetJITInfo
1211 mpTJI = TM.getJITInfo();
1212 // set TargetData
1213 mpTD = TM.getTargetData();
1214
1215 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
1216
1217 return;
1218}
1219
1220
1221// This callback is invoked when the specified function is about to be code
1222// generated. This initializes the BufferBegin/End/Ptr fields.
1223void CodeEmitter::startFunction(llvm::MachineFunction &F) {
1224 uintptr_t ActualSize = 0;
1225
1226 mpMemMgr->setMemoryWritable();
1227
1228 // BufferBegin, BufferEnd and CurBufferPtr are all inherited from class
1229 // MachineCodeEmitter, which is the super class of the class
1230 // JITCodeEmitter.
1231 //
1232 // BufferBegin/BufferEnd - Pointers to the start and end of the memory
1233 // allocated for this code buffer.
1234 //
1235 // CurBufferPtr - Pointer to the next byte of memory to fill when emitting
1236 // code. This is guranteed to be in the range
1237 // [BufferBegin, BufferEnd]. If this pointer is at
1238 // BufferEnd, it will never move due to code emission, and
1239 // all code emission requests will be ignored (this is the
1240 // buffer overflow condition).
1241 BufferBegin = CurBufferPtr =
1242 mpMemMgr->startFunctionBody(F.getFunction(), ActualSize);
1243 BufferEnd = BufferBegin + ActualSize;
1244
1245 if (mpCurEmitFunction == NULL)
1246 mpCurEmitFunction = new EmittedFunctionCode();
1247 mpCurEmitFunction->FunctionBody = BufferBegin;
1248
1249 // Ensure the constant pool/jump table info is at least 4-byte aligned.
1250 emitAlignment(16);
1251
1252 emitConstantPool(F.getConstantPool());
1253 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1254 initJumpTableInfo(MJTI);
1255
1256 // About to start emitting the machine code for the function.
1257 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
1258
1259 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
1260
1261 mpCurEmitFunction->Code = CurBufferPtr;
1262
1263 mMBBLocations.clear();
1264}
1265
1266
1267// This callback is invoked when the specified function has finished code
1268// generation. If a buffer overflow has occurred, this method returns true
1269// (the callee is required to try again).
1270bool CodeEmitter::finishFunction(llvm::MachineFunction &F) {
1271 if (CurBufferPtr == BufferEnd) {
1272 // No enough memory
1273 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1274 return false;
1275 }
1276
1277 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1278 emitJumpTableInfo(MJTI);
1279
1280 // FnStart is the start of the text, not the start of the constant pool
1281 // and other per-function data.
1282 uint8_t *FnStart =
1283 reinterpret_cast<uint8_t*>(
1284 GetPointerToGlobalIfAvailable(F.getFunction()));
1285
1286 // FnEnd is the end of the function's machine code.
1287 uint8_t *FnEnd = CurBufferPtr;
1288
1289 if (!mRelocations.empty()) {
1290 ptrdiff_t BufferOffset = BufferBegin - mpMemMgr->getCodeMemBase();
1291
1292 // Resolve the relocations to concrete pointers.
1293 for (int i = 0, e = mRelocations.size(); i != e; i++) {
1294 llvm::MachineRelocation &MR = mRelocations[i];
1295 void *ResultPtr = NULL;
1296
1297 if (!MR.letTargetResolve()) {
1298 if (MR.isExternalSymbol()) {
1299 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
1300
1301 if (MR.mayNeedFarStub()) {
1302 ResultPtr = GetExternalFunctionStub(ResultPtr);
1303 }
1304
1305 } else if (MR.isGlobalValue()) {
1306 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
1307 BufferBegin
1308 + MR.getMachineCodeOffset(),
1309 MR.mayNeedFarStub());
1310 } else if (MR.isIndirectSymbol()) {
1311 ResultPtr =
1312 GetPointerToGVIndirectSym(
1313 MR.getGlobalValue(),
1314 BufferBegin + MR.getMachineCodeOffset());
1315 } else if (MR.isBasicBlock()) {
1316 ResultPtr =
1317 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
1318 } else if (MR.isConstantPoolIndex()) {
1319 ResultPtr =
1320 (void*) getConstantPoolEntryAddress(MR.getConstantPoolIndex());
1321 } else {
1322 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
1323 ResultPtr =
1324 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
1325 }
1326
1327 if (!MR.isExternalSymbol() || MR.mayNeedFarStub()) {
1328 // TODO(logan): Cache external symbol relocation entry.
1329 // Currently, we are not caching them. But since Android
1330 // system is using prelink, it is not a problem.
1331
1332 // Cache the relocation result address
1333 mCachingRelocations.push_back(
1334 oBCCRelocEntry(MR.getRelocationType(),
1335 MR.getMachineCodeOffset() + BufferOffset,
1336 ResultPtr));
1337 }
1338
1339 MR.setResultPointer(ResultPtr);
1340 }
1341 }
1342
1343 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
1344 mpMemMgr->getGOTBase());
1345 }
1346
1347 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1348 // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
1349 // global variables that were referenced in the relocations.
1350 if (CurBufferPtr == BufferEnd)
1351 return false;
1352
1353 // Now that we've succeeded in emitting the function.
1354 mpCurEmitFunction->Size = CurBufferPtr - BufferBegin;
1355 BufferBegin = CurBufferPtr = 0;
1356
1357 if (F.getFunction()->hasName())
1358 mEmittedFunctions[F.getFunction()->getNameStr()] = mpCurEmitFunction;
1359 mpCurEmitFunction = NULL;
1360
1361 mRelocations.clear();
1362 mConstPoolAddresses.clear();
1363
1364 if (mpMMI)
1365 mpMMI->EndFunction();
1366
1367 updateFunctionStub(F.getFunction());
1368
1369 // Mark code region readable and executable if it's not so already.
1370 mpMemMgr->setMemoryExecutable();
1371
1372 Disassemble(F.getFunction()->getName(), FnStart, FnEnd - FnStart, false);
1373
1374 return false;
1375}
1376
1377
1378void CodeEmitter::startGVStub(const llvm::GlobalValue *GV, unsigned StubSize,
1379 unsigned Alignment) {
1380 mpSavedBufferBegin = BufferBegin;
1381 mpSavedBufferEnd = BufferEnd;
1382 mpSavedCurBufferPtr = CurBufferPtr;
1383
1384 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
1385 Alignment);
1386 BufferEnd = BufferBegin + StubSize + 1;
1387
1388 return;
1389}
1390
1391
1392void CodeEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1393 mpSavedBufferBegin = BufferBegin;
1394 mpSavedBufferEnd = BufferEnd;
1395 mpSavedCurBufferPtr = CurBufferPtr;
1396
1397 BufferBegin = CurBufferPtr = reinterpret_cast<uint8_t *>(Buffer);
1398 BufferEnd = BufferBegin + StubSize + 1;
1399
1400 return;
1401}
1402
1403
1404void CodeEmitter::finishGVStub() {
1405 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1406
1407 // restore
1408 BufferBegin = mpSavedBufferBegin;
1409 BufferEnd = mpSavedBufferEnd;
1410 CurBufferPtr = mpSavedCurBufferPtr;
1411}
1412
1413
1414// Allocates and fills storage for an indirect GlobalValue, and returns the
1415// address.
1416void *CodeEmitter::allocIndirectGV(const llvm::GlobalValue *GV,
1417 const uint8_t *Buffer, size_t Size,
1418 unsigned Alignment) {
1419 uint8_t *IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
1420 memcpy(IndGV, Buffer, Size);
1421 return IndGV;
1422}
1423
1424
1425// Allocate memory for a global. Unlike allocateSpace, this method does not
1426// allocate memory in the current output buffer, because a global may live
1427// longer than the current function.
1428void *CodeEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1429 // Delegate this call through the memory manager.
1430 return mpMemMgr->allocateGlobal(Size, Alignment);
1431}
1432
1433
1434// This should be called by the target when a new basic block is about to be
1435// emitted. This way the MCE knows where the start of the block is, and can
1436// implement getMachineBasicBlockAddress.
1437void CodeEmitter::StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
1438 if (mMBBLocations.size() <= (unsigned) MBB->getNumber())
1439 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
1440 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
1441 return;
1442}
1443
1444
1445// Return the address of the jump table with index @Index in the function
1446// that last called initJumpTableInfo.
1447uintptr_t CodeEmitter::getJumpTableEntryAddress(unsigned Index) const {
1448 const std::vector<llvm::MachineJumpTableEntry> &JT =
1449 mpJumpTable->getJumpTables();
1450
1451 assert((Index < JT.size()) && "Invalid jump table index!");
1452
1453 unsigned int Offset = 0;
1454 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
1455
1456 for (unsigned i = 0; i < Index; i++)
1457 Offset += JT[i].MBBs.size();
1458 Offset *= EntrySize;
1459
1460 return (uintptr_t)(reinterpret_cast<uint8_t*>(mpJumpTableBase) + Offset);
1461}
1462
1463
1464// Return the address of the specified MachineBasicBlock, only usable after
1465// the label for the MBB has been emitted.
1466uintptr_t CodeEmitter::getMachineBasicBlockAddress(
1467 llvm::MachineBasicBlock *MBB) const {
1468 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
1469 mMBBLocations[MBB->getNumber()] &&
1470 "MBB not emitted!");
1471 return mMBBLocations[MBB->getNumber()];
1472}
1473
1474
1475void CodeEmitter::updateFunctionStub(const llvm::Function *F) {
1476 // Get the empty stub we generated earlier.
1477 void *Stub;
1478 std::set<const llvm::Function*>::iterator I = PendingFunctions.find(F);
1479 if (I != PendingFunctions.end())
1480 Stub = mFunctionToLazyStubMap[F];
1481 else
1482 return;
1483
1484 void *Addr = GetPointerToGlobalIfAvailable(F);
1485
1486 assert(Addr != Stub &&
1487 "Function must have non-stub address to be updated.");
1488
1489 // Tell the target jit info to rewrite the stub at the specified address,
1490 // rather than creating a new one.
1491 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1492 startGVStub(Stub, SL.Size);
1493 mpTJI->emitFunctionStub(F, Addr, *this);
1494 finishGVStub();
1495
1496 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1497 SL.Size, true);
1498
1499 PendingFunctions.erase(I);
1500}
1501
1502
1503void CodeEmitter::getFunctionNames(BCCsizei *actualFunctionCount,
1504 BCCsizei maxFunctionCount,
1505 BCCchar **functions) {
1506 int functionCount = mEmittedFunctions.size();
1507
1508 if (actualFunctionCount)
1509 *actualFunctionCount = functionCount;
1510 if (functionCount > maxFunctionCount)
1511 functionCount = maxFunctionCount;
1512 if (functions)
1513 for (EmittedFunctionsMapTy::const_iterator
1514 I = mEmittedFunctions.begin(), E = mEmittedFunctions.end();
1515 I != E && (functionCount > 0); I++, functionCount--) {
1516 *functions++ = const_cast<BCCchar*>(I->first.c_str());
1517 }
1518}
1519
1520
1521void CodeEmitter::getFunctionBinary(BCCchar *label,
1522 BCCvoid **base,
1523 BCCsizei *length) {
1524 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(label);
1525 if (I == mEmittedFunctions.end()) {
1526 *base = NULL;
1527 *length = 0;
1528 } else {
1529 *base = I->second->Code;
1530 *length = I->second->Size;
1531 }
1532}
1533
1534} // namespace bcc