blob: 38fc281a81ff62e06afa7a5b857349a0cd92d28b [file] [log] [blame]
Logan28325bf2010-11-26 23:27:41 +08001/*
2 * Copyright 2010, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Loganc4395232010-11-27 18:54:17 +080017#include "CodeEmitter.h"
Logan28325bf2010-11-26 23:27:41 +080018
Loganc4395232010-11-27 18:54:17 +080019#include "CodeMemoryManager.h"
20#include "EmittedFuncEntry.h"
21#include "Runtime.h"
Logan28325bf2010-11-26 23:27:41 +080022
23#include <bcc/bcc.h>
24#include <bcc/bcc_cache.h>
25
26#include "llvm/ADT/APFloat.h"
27#include "llvm/ADT/APInt.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/StringRef.h"
31
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineConstantPool.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineModuleInfo.h"
36#include "llvm/CodeGen/MachineRelocation.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/JITCodeEmitter.h"
39
40#include "llvm/ExecutionEngine/GenericValue.h"
41
42#include "llvm/MC/MCAsmInfo.h"
43#include "llvm/MC/MCDisassembler.h"
44#include "llvm/MC/MCInst.h"
45#include "llvm/MC/MCInstPrinter.h"
46
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/raw_ostream.h"
49
Logan2037d722010-11-27 14:31:56 +080050#if defined(USE_DISASSEMBLER)
51#include "llvm/Support/MemoryObject.h"
52#endif
53
Logan28325bf2010-11-26 23:27:41 +080054#include "llvm/System/Host.h"
55
56#include "llvm/Target/TargetData.h"
57#include "llvm/Target/TargetMachine.h"
58#include "llvm/Target/TargetRegistry.h"
59#include "llvm/Target/TargetJITInfo.h"
60
61#include "llvm/Constant.h"
62#include "llvm/Constants.h"
63#include "llvm/DerivedTypes.h"
64#include "llvm/Function.h"
65#include "llvm/GlobalAlias.h"
66#include "llvm/GlobalValue.h"
67#include "llvm/GlobalVariable.h"
68#include "llvm/Instruction.h"
69#include "llvm/Type.h"
70
71#include <algorithm>
72#include <vector>
73#include <set>
74#include <string>
75
76#include <stddef.h>
77
78
Logan2037d722010-11-27 14:31:56 +080079namespace {
80
81#if defined(USE_DISASSEMBLER)
82class BufferMemoryObject : public llvm::MemoryObject {
83private:
84 const uint8_t *mBytes;
85 uint64_t mLength;
86
87public:
88 BufferMemoryObject(const uint8_t *Bytes, uint64_t Length)
89 : mBytes(Bytes), mLength(Length) {
90 }
91
92 virtual uint64_t getBase() const { return 0; }
93 virtual uint64_t getExtent() const { return mLength; }
94
95 virtual int readByte(uint64_t Addr, uint8_t *Byte) const {
96 if (Addr > getExtent())
97 return -1;
98 *Byte = mBytes[Addr];
99 return 0;
100 }
101};
102#endif
103
104}; // namespace anonymous
105
106
Logan28325bf2010-11-26 23:27:41 +0800107namespace bcc {
108
109// Will take the ownership of @MemMgr
110CodeEmitter::CodeEmitter(CodeMemoryManager *pMemMgr)
111 : mpMemMgr(pMemMgr),
112 mpTarget(NULL),
113 mpTJI(NULL),
114 mpTD(NULL),
115 mpCurEmitFunction(NULL),
116 mpConstantPool(NULL),
117 mpJumpTable(NULL),
118 mpMMI(NULL),
119#if defined(USE_DISASSEMBLER)
120 mpAsmInfo(NULL),
121 mpDisassmbler(NULL),
122 mpIP(NULL),
123#endif
124 mpSymbolLookupFn(NULL),
125 mpSymbolLookupContext(NULL) {
126}
127
128
129CodeEmitter::~CodeEmitter() {
130 delete mpMemMgr;
131#if defined(USE_DISASSEMBLER)
132 delete mpAsmInfo;
133 delete mpDisassmbler;
134 delete mpIP;
135#endif
136}
137
138
139// Once you finish the compilation on a translation unit, you can call this
140// function to recycle the memory (which is used at compilation time and not
141// needed for runtime).
142//
143// NOTE: You should not call this funtion until the code-gen passes for a
144// given module is done. Otherwise, the results is undefined and may
145// cause the system crash!
146void CodeEmitter::releaseUnnecessary() {
147 mMBBLocations.clear();
148 mLabelLocations.clear();
149 mGlobalAddressMap.clear();
150 mFunctionToLazyStubMap.clear();
151 GlobalToIndirectSymMap.clear();
152 ExternalFnToStubMap.clear();
153 PendingFunctions.clear();
154}
155
156
157void CodeEmitter::reset() {
158 releaseUnnecessary();
159
160 mpSymbolLookupFn = NULL;
161 mpSymbolLookupContext = NULL;
162
163 mpTJI = NULL;
164 mpTD = NULL;
165
166 for (EmittedFunctionsMapTy::iterator I = mEmittedFunctions.begin(),
167 E = mEmittedFunctions.end();
168 I != E;
169 I++)
170 if (I->second != NULL)
171 delete I->second;
172 mEmittedFunctions.clear();
173
174 mpMemMgr->reset();
175}
176
177
178void *CodeEmitter::UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
179 if (Addr == NULL) {
180 // Removing mapping
181 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
182 void *OldVal;
183
184 if (I == mGlobalAddressMap.end()) {
185 OldVal = NULL;
186 } else {
187 OldVal = I->second;
188 mGlobalAddressMap.erase(I);
189 }
190
191 return OldVal;
192 }
193
194 void *&CurVal = mGlobalAddressMap[GV];
195 void *OldVal = CurVal;
196
197 CurVal = Addr;
198
199 return OldVal;
200}
201
202
203unsigned int CodeEmitter::GetConstantPoolSizeInBytes(
204 llvm::MachineConstantPool *MCP) {
205 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
206 MCP->getConstants();
207
208 if (Constants.empty())
209 return 0;
210
211 unsigned int Size = 0;
212 for (int i = 0, e = Constants.size(); i != e; i++) {
213 llvm::MachineConstantPoolEntry CPE = Constants[i];
214 unsigned int AlignMask = CPE.getAlignment() - 1;
215 Size = (Size + AlignMask) & ~AlignMask;
216 const llvm::Type *Ty = CPE.getType();
217 Size += mpTD->getTypeAllocSize(Ty);
218 }
219
220 return Size;
221}
222
223// This function converts a Constant* into a GenericValue. The interesting
224// part is if C is a ConstantExpr.
225void CodeEmitter::GetConstantValue(const llvm::Constant *C,
226 llvm::GenericValue &Result) {
227 if (C->getValueID() == llvm::Value::UndefValueVal)
228 return;
229 else if (C->getValueID() == llvm::Value::ConstantExprVal) {
230 const llvm::ConstantExpr *CE = (llvm::ConstantExpr*) C;
231 const llvm::Constant *Op0 = CE->getOperand(0);
232
233 switch (CE->getOpcode()) {
234 case llvm::Instruction::GetElementPtr: {
235 // Compute the index
236 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
237 CE->op_end());
238 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
239 &Indices[0],
240 Indices.size());
241
242 GetConstantValue(Op0, Result);
243 Result.PointerVal =
244 static_cast<uint8_t*>(Result.PointerVal) + Offset;
245
246 return;
247 }
248 case llvm::Instruction::Trunc: {
249 uint32_t BitWidth =
250 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
251
252 GetConstantValue(Op0, Result);
253 Result.IntVal = Result.IntVal.trunc(BitWidth);
254
255 return;
256 }
257 case llvm::Instruction::ZExt: {
258 uint32_t BitWidth =
259 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
260
261 GetConstantValue(Op0, Result);
262 Result.IntVal = Result.IntVal.zext(BitWidth);
263
264 return;
265 }
266 case llvm::Instruction::SExt: {
267 uint32_t BitWidth =
268 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
269
270 GetConstantValue(Op0, Result);
271 Result.IntVal = Result.IntVal.sext(BitWidth);
272
273 return;
274 }
275 case llvm::Instruction::FPTrunc: {
276 // TODO(all): fixme: long double
277 GetConstantValue(Op0, Result);
278 Result.FloatVal = static_cast<float>(Result.DoubleVal);
279 return;
280 }
281 case llvm::Instruction::FPExt: {
282 // TODO(all): fixme: long double
283 GetConstantValue(Op0, Result);
284 Result.DoubleVal = static_cast<double>(Result.FloatVal);
285 return;
286 }
287 case llvm::Instruction::UIToFP: {
288 GetConstantValue(Op0, Result);
289 if (CE->getType()->isFloatTy())
290 Result.FloatVal =
291 static_cast<float>(Result.IntVal.roundToDouble());
292 else if (CE->getType()->isDoubleTy())
293 Result.DoubleVal = Result.IntVal.roundToDouble();
294 else if (CE->getType()->isX86_FP80Ty()) {
295 const uint64_t zero[] = { 0, 0 };
296 llvm::APFloat apf(llvm::APInt(80, 2, zero));
297 apf.convertFromAPInt(Result.IntVal,
298 false,
299 llvm::APFloat::rmNearestTiesToEven);
300 Result.IntVal = apf.bitcastToAPInt();
301 }
302 return;
303 }
304 case llvm::Instruction::SIToFP: {
305 GetConstantValue(Op0, Result);
306 if (CE->getType()->isFloatTy())
307 Result.FloatVal =
308 static_cast<float>(Result.IntVal.signedRoundToDouble());
309 else if (CE->getType()->isDoubleTy())
310 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
311 else if (CE->getType()->isX86_FP80Ty()) {
312 const uint64_t zero[] = { 0, 0 };
313 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
314 apf.convertFromAPInt(Result.IntVal,
315 true,
316 llvm::APFloat::rmNearestTiesToEven);
317 Result.IntVal = apf.bitcastToAPInt();
318 }
319 return;
320 }
321 // double->APInt conversion handles sign
322 case llvm::Instruction::FPToUI:
323 case llvm::Instruction::FPToSI: {
324 uint32_t BitWidth =
325 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
326
327 GetConstantValue(Op0, Result);
328 if (Op0->getType()->isFloatTy())
329 Result.IntVal =
330 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
331 else if (Op0->getType()->isDoubleTy())
332 Result.IntVal =
333 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal,
334 BitWidth);
335 else if (Op0->getType()->isX86_FP80Ty()) {
336 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
337 uint64_t V;
338 bool Ignored;
339 apf.convertToInteger(&V,
340 BitWidth,
341 CE->getOpcode() == llvm::Instruction::FPToSI,
342 llvm::APFloat::rmTowardZero,
343 &Ignored);
344 Result.IntVal = V; // endian?
345 }
346 return;
347 }
348 case llvm::Instruction::PtrToInt: {
349 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
350
351 GetConstantValue(Op0, Result);
352 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
353 (Result.PointerVal));
354
355 return;
356 }
357 case llvm::Instruction::IntToPtr: {
358 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
359
360 GetConstantValue(Op0, Result);
361 if (PtrWidth != Result.IntVal.getBitWidth())
362 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
363 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
364
365 Result.PointerVal =
366 llvm::PointerTy(
367 static_cast<uintptr_t>(Result.IntVal.getZExtValue()));
368
369 return;
370 }
371 case llvm::Instruction::BitCast: {
372 GetConstantValue(Op0, Result);
373 const llvm::Type *DestTy = CE->getType();
374
375 switch (Op0->getType()->getTypeID()) {
376 case llvm::Type::IntegerTyID: {
377 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
378 if (DestTy->isFloatTy())
379 Result.FloatVal = Result.IntVal.bitsToFloat();
380 else if (DestTy->isDoubleTy())
381 Result.DoubleVal = Result.IntVal.bitsToDouble();
382 break;
383 }
384 case llvm::Type::FloatTyID: {
385 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
386 Result.IntVal.floatToBits(Result.FloatVal);
387 break;
388 }
389 case llvm::Type::DoubleTyID: {
390 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
391 Result.IntVal.doubleToBits(Result.DoubleVal);
392 break;
393 }
394 case llvm::Type::PointerTyID: {
395 assert(DestTy->isPointerTy() && "Invalid bitcast");
396 break; // getConstantValue(Op0) above already converted it
397 }
398 default: {
399 llvm_unreachable("Invalid bitcast operand");
400 }
401 }
402 return;
403 }
404 case llvm::Instruction::Add:
405 case llvm::Instruction::FAdd:
406 case llvm::Instruction::Sub:
407 case llvm::Instruction::FSub:
408 case llvm::Instruction::Mul:
409 case llvm::Instruction::FMul:
410 case llvm::Instruction::UDiv:
411 case llvm::Instruction::SDiv:
412 case llvm::Instruction::URem:
413 case llvm::Instruction::SRem:
414 case llvm::Instruction::And:
415 case llvm::Instruction::Or:
416 case llvm::Instruction::Xor: {
417 llvm::GenericValue LHS, RHS;
418 GetConstantValue(Op0, LHS);
419 GetConstantValue(CE->getOperand(1), RHS);
420
421 switch (Op0->getType()->getTypeID()) {
422 case llvm::Type::IntegerTyID: {
423 switch (CE->getOpcode()) {
424 case llvm::Instruction::Add: {
425 Result.IntVal = LHS.IntVal + RHS.IntVal;
426 break;
427 }
428 case llvm::Instruction::Sub: {
429 Result.IntVal = LHS.IntVal - RHS.IntVal;
430 break;
431 }
432 case llvm::Instruction::Mul: {
433 Result.IntVal = LHS.IntVal * RHS.IntVal;
434 break;
435 }
436 case llvm::Instruction::UDiv: {
437 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
438 break;
439 }
440 case llvm::Instruction::SDiv: {
441 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
442 break;
443 }
444 case llvm::Instruction::URem: {
445 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
446 break;
447 }
448 case llvm::Instruction::SRem: {
449 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
450 break;
451 }
452 case llvm::Instruction::And: {
453 Result.IntVal = LHS.IntVal & RHS.IntVal;
454 break;
455 }
456 case llvm::Instruction::Or: {
457 Result.IntVal = LHS.IntVal | RHS.IntVal;
458 break;
459 }
460 case llvm::Instruction::Xor: {
461 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
462 break;
463 }
464 default: {
465 llvm_unreachable("Invalid integer opcode");
466 }
467 }
468 break;
469 }
470 case llvm::Type::FloatTyID: {
471 switch (CE->getOpcode()) {
472 case llvm::Instruction::FAdd: {
473 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
474 break;
475 }
476 case llvm::Instruction::FSub: {
477 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
478 break;
479 }
480 case llvm::Instruction::FMul: {
481 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
482 break;
483 }
484 case llvm::Instruction::FDiv: {
485 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
486 break;
487 }
488 case llvm::Instruction::FRem: {
489 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
490 break;
491 }
492 default: {
493 llvm_unreachable("Invalid float opcode");
494 }
495 }
496 break;
497 }
498 case llvm::Type::DoubleTyID: {
499 switch (CE->getOpcode()) {
500 case llvm::Instruction::FAdd: {
501 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
502 break;
503 }
504 case llvm::Instruction::FSub: {
505 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
506 break;
507 }
508 case llvm::Instruction::FMul: {
509 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
510 break;
511 }
512 case llvm::Instruction::FDiv: {
513 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
514 break;
515 }
516 case llvm::Instruction::FRem: {
517 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
518 break;
519 }
520 default: {
521 llvm_unreachable("Invalid double opcode");
522 }
523 }
524 break;
525 }
526 case llvm::Type::X86_FP80TyID:
527 case llvm::Type::PPC_FP128TyID:
528 case llvm::Type::FP128TyID: {
529 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
530 switch (CE->getOpcode()) {
531 case llvm::Instruction::FAdd: {
532 apfLHS.add(llvm::APFloat(RHS.IntVal),
533 llvm::APFloat::rmNearestTiesToEven);
534 break;
535 }
536 case llvm::Instruction::FSub: {
537 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
538 llvm::APFloat::rmNearestTiesToEven);
539 break;
540 }
541 case llvm::Instruction::FMul: {
542 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
543 llvm::APFloat::rmNearestTiesToEven);
544 break;
545 }
546 case llvm::Instruction::FDiv: {
547 apfLHS.divide(llvm::APFloat(RHS.IntVal),
548 llvm::APFloat::rmNearestTiesToEven);
549 break;
550 }
551 case llvm::Instruction::FRem: {
552 apfLHS.mod(llvm::APFloat(RHS.IntVal),
553 llvm::APFloat::rmNearestTiesToEven);
554 break;
555 }
556 default: {
557 llvm_unreachable("Invalid long double opcode");
558 }
559 }
560 Result.IntVal = apfLHS.bitcastToAPInt();
561 break;
562 }
563 default: {
564 llvm_unreachable("Bad add type!");
565 }
566 } // End switch (Op0->getType()->getTypeID())
567 return;
568 }
569 default: {
570 break;
571 }
572 } // End switch (CE->getOpcode())
573
574 std::string msg;
575 llvm::raw_string_ostream Msg(msg);
576 Msg << "ConstantExpr not handled: " << *CE;
577 llvm::report_fatal_error(Msg.str());
578 } // C->getValueID() == llvm::Value::ConstantExprVal
579
580 switch (C->getType()->getTypeID()) {
581 case llvm::Type::FloatTyID: {
582 Result.FloatVal =
583 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToFloat();
584 break;
585 }
586 case llvm::Type::DoubleTyID: {
587 Result.DoubleVal =
588 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToDouble();
589 break;
590 }
591 case llvm::Type::X86_FP80TyID:
592 case llvm::Type::FP128TyID:
593 case llvm::Type::PPC_FP128TyID: {
594 Result.IntVal =
595 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().bitcastToAPInt();
596 break;
597 }
598 case llvm::Type::IntegerTyID: {
599 Result.IntVal =
600 llvm::cast<llvm::ConstantInt>(C)->getValue();
601 break;
602 }
603 case llvm::Type::PointerTyID: {
604 switch (C->getValueID()) {
605 case llvm::Value::ConstantPointerNullVal: {
606 Result.PointerVal = NULL;
607 break;
608 }
609 case llvm::Value::FunctionVal: {
610 const llvm::Function *F = static_cast<const llvm::Function*>(C);
611 Result.PointerVal =
612 GetPointerToFunctionOrStub(const_cast<llvm::Function*>(F));
613 break;
614 }
615 case llvm::Value::GlobalVariableVal: {
616 const llvm::GlobalVariable *GV =
617 static_cast<const llvm::GlobalVariable*>(C);
618 Result.PointerVal =
619 GetOrEmitGlobalVariable(const_cast<llvm::GlobalVariable*>(GV));
620 break;
621 }
622 case llvm::Value::BlockAddressVal: {
623 assert(false && "JIT does not support address-of-label yet!");
624 }
625 default: {
626 llvm_unreachable("Unknown constant pointer type!");
627 }
628 }
629 break;
630 }
631 default: {
632 std::string msg;
633 llvm::raw_string_ostream Msg(msg);
634 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
635 llvm::report_fatal_error(Msg.str());
636 break;
637 }
638 }
639 return;
640}
641
642
643// Stores the data in @Val of type @Ty at address @Addr.
644void CodeEmitter::StoreValueToMemory(const llvm::GenericValue &Val,
645 void *Addr,
646 const llvm::Type *Ty) {
647 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
648
649 switch (Ty->getTypeID()) {
650 case llvm::Type::IntegerTyID: {
651 const llvm::APInt &IntVal = Val.IntVal;
652 assert(((IntVal.getBitWidth() + 7) / 8 >= StoreBytes) &&
653 "Integer too small!");
654
655 const uint8_t *Src =
656 reinterpret_cast<const uint8_t*>(IntVal.getRawData());
657
658 if (llvm::sys::isLittleEndianHost()) {
659 // Little-endian host - the source is ordered from LSB to MSB.
660 // Order the destination from LSB to MSB: Do a straight copy.
661 memcpy(Addr, Src, StoreBytes);
662 } else {
663 // Big-endian host - the source is an array of 64 bit words
664 // ordered from LSW to MSW.
665 //
666 // Each word is ordered from MSB to LSB.
667 //
668 // Order the destination from MSB to LSB:
669 // Reverse the word order, but not the bytes in a word.
670 unsigned int i = StoreBytes;
671 while (i > sizeof(uint64_t)) {
672 i -= sizeof(uint64_t);
673 ::memcpy(reinterpret_cast<uint8_t*>(Addr) + i,
674 Src,
675 sizeof(uint64_t));
676 Src += sizeof(uint64_t);
677 }
678 ::memcpy(Addr, Src + sizeof(uint64_t) - i, i);
679 }
680 break;
681 }
682 case llvm::Type::FloatTyID: {
683 *reinterpret_cast<float*>(Addr) = Val.FloatVal;
684 break;
685 }
686 case llvm::Type::DoubleTyID: {
687 *reinterpret_cast<double*>(Addr) = Val.DoubleVal;
688 break;
689 }
690 case llvm::Type::X86_FP80TyID: {
691 memcpy(Addr, Val.IntVal.getRawData(), 10);
692 break;
693 }
694 case llvm::Type::PointerTyID: {
695 // Ensure 64 bit target pointers are fully initialized on 32 bit
696 // hosts.
697 if (StoreBytes != sizeof(llvm::PointerTy))
698 memset(Addr, 0, StoreBytes);
699 *((llvm::PointerTy*) Addr) = Val.PointerVal;
700 break;
701 }
702 default: {
703 break;
704 }
705 }
706
707 if (llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
708 std::reverse(reinterpret_cast<uint8_t*>(Addr),
709 reinterpret_cast<uint8_t*>(Addr) + StoreBytes);
710
711 return;
712}
713
714
715// Recursive function to apply a @Constant value into the specified memory
716// location @Addr.
717void CodeEmitter::InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
718 switch (C->getValueID()) {
719 case llvm::Value::UndefValueVal: {
720 // Nothing to do
721 break;
722 }
723 case llvm::Value::ConstantVectorVal: {
724 // dynamic cast may hurt performance
725 const llvm::ConstantVector *CP = (llvm::ConstantVector*) C;
726
727 unsigned int ElementSize = mpTD->getTypeAllocSize
728 (CP->getType()->getElementType());
729
730 for (int i = 0, e = CP->getNumOperands(); i != e;i++)
731 InitializeConstantToMemory(
732 CP->getOperand(i),
733 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
734 break;
735 }
736 case llvm::Value::ConstantAggregateZeroVal: {
737 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
738 break;
739 }
740 case llvm::Value::ConstantArrayVal: {
741 const llvm::ConstantArray *CPA = (llvm::ConstantArray*) C;
742 unsigned int ElementSize = mpTD->getTypeAllocSize
743 (CPA->getType()->getElementType());
744
745 for (int i = 0, e = CPA->getNumOperands(); i != e; i++)
746 InitializeConstantToMemory(
747 CPA->getOperand(i),
748 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
749 break;
750 }
751 case llvm::Value::ConstantStructVal: {
752 const llvm::ConstantStruct *CPS =
753 static_cast<const llvm::ConstantStruct*>(C);
754 const llvm::StructLayout *SL = mpTD->getStructLayout
755 (llvm::cast<llvm::StructType>(CPS->getType()));
756
757 for (int i = 0, e = CPS->getNumOperands(); i != e; i++)
758 InitializeConstantToMemory(
759 CPS->getOperand(i),
760 reinterpret_cast<uint8_t*>(Addr) + SL->getElementOffset(i));
761 break;
762 }
763 default: {
764 if (C->getType()->isFirstClassType()) {
765 llvm::GenericValue Val;
766 GetConstantValue(C, Val);
767 StoreValueToMemory(Val, Addr, C->getType());
768 } else {
769 llvm_unreachable("Unknown constant type to initialize memory "
770 "with!");
771 }
772 break;
773 }
774 }
775 return;
776}
777
778
779void CodeEmitter::emitConstantPool(llvm::MachineConstantPool *MCP) {
780 if (mpTJI->hasCustomConstantPool())
781 return;
782
783 // Constant pool address resolution is handled by the target itself in ARM
784 // (TargetJITInfo::hasCustomConstantPool() returns true).
785#if !defined(PROVIDE_ARM_CODEGEN)
786 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
787 MCP->getConstants();
788
789 if (Constants.empty())
790 return;
791
792 unsigned Size = GetConstantPoolSizeInBytes(MCP);
793 unsigned Align = MCP->getConstantPoolAlignment();
794
795 mpConstantPoolBase = allocateSpace(Size, Align);
796 mpConstantPool = MCP;
797
798 if (mpConstantPoolBase == NULL)
799 return; // out of memory
800
801 unsigned Offset = 0;
802 for (int i = 0, e = Constants.size(); i != e; i++) {
803 llvm::MachineConstantPoolEntry CPE = Constants[i];
804 unsigned AlignMask = CPE.getAlignment() - 1;
805 Offset = (Offset + AlignMask) & ~AlignMask;
806
807 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
808 mConstPoolAddresses.push_back(CAddr);
809
810 if (CPE.isMachineConstantPoolEntry())
811 llvm::report_fatal_error
812 ("Initialize memory with machine specific constant pool"
813 " entry has not been implemented!");
814
815 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
816
817 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
818 Offset += mpTD->getTypeAllocSize(Ty);
819 }
820#endif
821 return;
822}
823
824
825void CodeEmitter::initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
826 if (mpTJI->hasCustomJumpTables())
827 return;
828
829 const std::vector<llvm::MachineJumpTableEntry> &JT =
830 MJTI->getJumpTables();
831 if (JT.empty())
832 return;
833
834 unsigned NumEntries = 0;
835 for (int i = 0, e = JT.size(); i != e; i++)
836 NumEntries += JT[i].MBBs.size();
837
838 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
839
840 mpJumpTable = MJTI;
841 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
842 MJTI->getEntryAlignment(*mpTD));
843
844 return;
845}
846
847
848void CodeEmitter::emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
849 if (mpTJI->hasCustomJumpTables())
850 return;
851
852 const std::vector<llvm::MachineJumpTableEntry> &JT =
853 MJTI->getJumpTables();
854 if (JT.empty() || mpJumpTableBase == 0)
855 return;
856
857 assert(llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static &&
858 (MJTI->getEntrySize(*mpTD) == sizeof(mpTD /* a pointer type */)) &&
859 "Cross JIT'ing?");
860
861 // For each jump table, map each target in the jump table to the
862 // address of an emitted MachineBasicBlock.
863 intptr_t *SlotPtr = reinterpret_cast<intptr_t*>(mpJumpTableBase);
864 for (int i = 0, ie = JT.size(); i != ie; i++) {
865 const std::vector<llvm::MachineBasicBlock*> &MBBs = JT[i].MBBs;
866 // Store the address of the basic block for this jump table slot in the
867 // memory we allocated for the jump table in 'initJumpTableInfo'
868 for (int j = 0, je = MBBs.size(); j != je; j++)
869 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
870 }
871}
872
873
874void *CodeEmitter::GetPointerToGlobal(llvm::GlobalValue *V,
875 void *Reference,
876 bool MayNeedFarStub) {
877 switch (V->getValueID()) {
878 case llvm::Value::FunctionVal: {
879 llvm::Function *F = (llvm::Function*) V;
880
881 // If we have code, go ahead and return that.
882 if (void *ResultPtr = GetPointerToGlobalIfAvailable(F))
883 return ResultPtr;
884
885 if (void *FnStub = GetLazyFunctionStubIfAvailable(F))
886 // Return the function stub if it's already created.
887 // We do this first so that:
888 // we're returning the same address for the function as any
889 // previous call.
890 //
891 // TODO(llvm.org): Yes, this is wrong. The lazy stub isn't
892 // guaranteed to be close enough to call.
893 return FnStub;
894
895 // If we know the target can handle arbitrary-distance calls, try to
896 // return a direct pointer.
897 if (!MayNeedFarStub) {
898 //
899 // x86_64 architecture may encounter the bug:
900 // http://llvm.org/bugs/show_bug.cgi?id=5201
901 // which generate instruction "call" instead of "callq".
902 //
903 // And once the real address of stub is greater than 64-bit
904 // long, the replacement will truncate to 32-bit resulting a
905 // serious problem.
906#if !defined(__x86_64__)
907 // If this is an external function pointer, we can force the JIT
908 // to 'compile' it, which really just adds it to the map.
909 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
910 return GetPointerToFunction(F, /* AbortOnFailure = */false);
911 // Changing to false because wanting to allow later calls to
912 // mpTJI->relocate() without aborting. For caching purpose
913 }
914#endif
915 }
916
917 // Otherwise, we may need a to emit a stub, and, conservatively, we
918 // always do so.
919 return GetLazyFunctionStub(F);
920 break;
921 }
922 case llvm::Value::GlobalVariableVal: {
923 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
924 break;
925 }
926 case llvm::Value::GlobalAliasVal: {
927 llvm::GlobalAlias *GA = (llvm::GlobalAlias*) V;
928 const llvm::GlobalValue *GV = GA->resolveAliasedGlobal(false);
929
930 switch (GV->getValueID()) {
931 case llvm::Value::FunctionVal: {
932 // TODO(all): is there's any possibility that the function is not
933 // code-gen'd?
934 return GetPointerToFunction(
935 static_cast<const llvm::Function*>(GV),
936 /* AbortOnFailure = */false);
937 // Changing to false because wanting to allow later calls to
938 // mpTJI->relocate() without aborting. For caching purpose
939 break;
940 }
941 case llvm::Value::GlobalVariableVal: {
942 if (void *P = mGlobalAddressMap[GV])
943 return P;
944
945 llvm::GlobalVariable *GVar = (llvm::GlobalVariable*) GV;
946 EmitGlobalVariable(GVar);
947
948 return mGlobalAddressMap[GV];
949 break;
950 }
951 case llvm::Value::GlobalAliasVal: {
952 assert(false && "Alias should be resolved ultimately!");
953 }
954 }
955 break;
956 }
957 default: {
958 break;
959 }
960 }
961 llvm_unreachable("Unknown type of global value!");
962}
963
964
965// If the specified function has been code-gen'd, return a pointer to the
966// function. If not, compile it, or use a stub to implement lazy compilation
967// if available.
968void *CodeEmitter::GetPointerToFunctionOrStub(llvm::Function *F) {
969 // If we have already code generated the function, just return the
970 // address.
971 if (void *Addr = GetPointerToGlobalIfAvailable(F))
972 return Addr;
973
974 // Get a stub if the target supports it.
975 return GetLazyFunctionStub(F);
976}
977
978
979void *CodeEmitter::GetLazyFunctionStub(llvm::Function *F) {
980 // If we already have a lazy stub for this function, recycle it.
981 void *&Stub = mFunctionToLazyStubMap[F];
982 if (Stub)
983 return Stub;
984
985 // In any cases, we should NOT resolve function at runtime (though we are
986 // able to). We resolve this right now.
987 void *Actual = NULL;
988 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
989 Actual = GetPointerToFunction(F, /* AbortOnFailure = */false);
990 // Changing to false because wanting to allow later calls to
991 // mpTJI->relocate() without aborting. For caching purpose
992 }
993
994 // Codegen a new stub, calling the actual address of the external
995 // function, if it was resolved.
996 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
997 startGVStub(F, SL.Size, SL.Alignment);
998 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
999 finishGVStub();
1000
1001 // We really want the address of the stub in the GlobalAddressMap for the
1002 // JIT, not the address of the external function.
1003 UpdateGlobalMapping(F, Stub);
1004
1005 if (!Actual)
1006 PendingFunctions.insert(F);
1007 else
1008 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1009 SL.Size, true);
1010
1011 return Stub;
1012}
1013
1014
1015void *CodeEmitter::GetPointerToFunction(const llvm::Function *F,
1016 bool AbortOnFailure) {
1017 void *Addr = GetPointerToGlobalIfAvailable(F);
1018 if (Addr)
1019 return Addr;
1020
1021 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
1022 "Internal error: only external defined function routes here!");
1023
1024 // Handle the failure resolution by ourselves.
1025 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
1026 /* AbortOnFailure = */ false);
1027
1028 // If we resolved the symbol to a null address (eg. a weak external)
1029 // return a null pointer let the application handle it.
1030 if (Addr == NULL) {
1031 if (AbortOnFailure)
1032 llvm::report_fatal_error("Could not resolve external function "
1033 "address: " + F->getName());
1034 else
1035 return NULL;
1036 }
1037
1038 AddGlobalMapping(F, Addr);
1039
1040 return Addr;
1041}
1042
1043
1044void *CodeEmitter::GetPointerToNamedSymbol(const std::string &Name,
1045 bool AbortOnFailure) {
1046 if (void *Addr = FindRuntimeFunction(Name.c_str()))
1047 return Addr;
1048
1049 if (mpSymbolLookupFn)
1050 if (void *Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1051 return Addr;
1052
1053 if (AbortOnFailure)
1054 llvm::report_fatal_error("Program used external symbol '" + Name +
1055 "' which could not be resolved!");
1056
1057 return NULL;
1058}
1059
1060
1061// Return the address of the specified global variable, possibly emitting it
1062// to memory if needed. This is used by the Emitter.
1063void *CodeEmitter::GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1064 void *Ptr = GetPointerToGlobalIfAvailable(GV);
1065 if (Ptr)
1066 return Ptr;
1067
1068 if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1069 // If the global is external, just remember the address.
1070 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1071 AddGlobalMapping(GV, Ptr);
1072 } else {
1073 // If the global hasn't been emitted to memory yet, allocate space and
1074 // emit it into memory.
1075 Ptr = GetMemoryForGV(GV);
1076 AddGlobalMapping(GV, Ptr);
1077 EmitGlobalVariable(GV);
1078 }
1079
1080 return Ptr;
1081}
1082
1083
1084// This method abstracts memory allocation of global variable so that the
1085// JIT can allocate thread local variables depending on the target.
1086void *CodeEmitter::GetMemoryForGV(const llvm::GlobalVariable *GV) {
1087 void *Ptr;
1088
1089 const llvm::Type *GlobalType = GV->getType()->getElementType();
1090 size_t S = mpTD->getTypeAllocSize(GlobalType);
1091 size_t A = mpTD->getPreferredAlignment(GV);
1092
1093 if (GV->isThreadLocal()) {
1094 // We can support TLS by
1095 //
1096 // Ptr = TJI.allocateThreadLocalMemory(S);
1097 //
1098 // But I tend not to.
1099 // (should we disable this in the front-end (i.e., slang)?).
1100 llvm::report_fatal_error
1101 ("Compilation of Thread Local Storage (TLS) is disabled!");
1102
1103 } else if (mpTJI->allocateSeparateGVMemory()) {
1104 if (A <= 8) {
1105 Ptr = malloc(S);
1106 } else {
1107 // Allocate (S + A) bytes of memory, then use an aligned pointer
1108 // within that space.
1109 Ptr = malloc(S + A);
1110 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1111 Ptr = reinterpret_cast<uint8_t*>(Ptr) +
1112 (MisAligned ? (A - MisAligned) : 0);
1113 }
1114 } else {
1115 Ptr = allocateGlobal(S, A);
1116 }
1117
1118 return Ptr;
1119}
1120
1121
1122void CodeEmitter::EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1123 void *GA = GetPointerToGlobalIfAvailable(GV);
1124
1125 if (GV->isThreadLocal())
1126 llvm::report_fatal_error
1127 ("We don't support Thread Local Storage (TLS)!");
1128
1129 if (GA == NULL) {
1130 // If it's not already specified, allocate memory for the global.
1131 GA = GetMemoryForGV(GV);
1132 AddGlobalMapping(GV, GA);
1133 }
1134
1135 InitializeConstantToMemory(GV->getInitializer(), GA);
1136
1137 // You can do some statistics on global variable here.
1138 return;
1139}
1140
1141
1142void *CodeEmitter::GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1143 // Make sure GV is emitted first, and create a stub containing the fully
1144 // resolved address.
1145 void *GVAddress = GetPointerToGlobal(V, Reference, false);
1146
1147 // If we already have a stub for this global variable, recycle it.
1148 void *&IndirectSym = GlobalToIndirectSymMap[V];
1149 // Otherwise, codegen a new indirect symbol.
1150 if (!IndirectSym)
1151 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1152
1153 return IndirectSym;
1154}
1155
1156
1157// Return a stub for the function at the specified address.
1158void *CodeEmitter::GetExternalFunctionStub(void *FnAddr) {
1159 void *&Stub = ExternalFnToStubMap[FnAddr];
1160 if (Stub)
1161 return Stub;
1162
1163 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1164 startGVStub(0, SL.Size, SL.Alignment);
1165 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1166 finishGVStub();
1167
1168 return Stub;
1169}
1170
1171
Logan28325bf2010-11-26 23:27:41 +08001172void CodeEmitter::Disassemble(const llvm::StringRef &Name,
1173 uint8_t *Start, size_t Length, bool IsStub) {
Logane57b3322010-11-27 18:21:00 +08001174
1175#if defined(USE_DISASSEMBLER)
Logan28325bf2010-11-26 23:27:41 +08001176 llvm::raw_ostream *OS;
1177
1178#if defined(USE_DISASSEMBLER_FILE)
1179 std::string ErrorInfo;
1180 OS = new llvm::raw_fd_ostream("/data/local/tmp/out.S",
1181 ErrorInfo,
1182 llvm::raw_fd_ostream::F_Append);
Logane57b3322010-11-27 18:21:00 +08001183
Logan28325bf2010-11-26 23:27:41 +08001184 if (!ErrorInfo.empty()) { // some errors occurred
1185 // LOGE("Error in creating disassembly file");
1186 delete OS;
1187 return;
1188 }
1189#else
1190 OS = &llvm::outs();
1191#endif
1192
1193 *OS << "JIT: Disassembled code: " << Name << ((IsStub) ? " (stub)" : "")
1194 << "\n";
1195
1196 if (mpAsmInfo == NULL)
Loganf2b79d02010-11-27 01:07:53 +08001197 mpAsmInfo = mpTarget->createAsmInfo(Compiler::Triple);
Logan28325bf2010-11-26 23:27:41 +08001198 if (mpDisassmbler == NULL)
1199 mpDisassmbler = mpTarget->createMCDisassembler();
1200 if (mpIP == NULL)
1201 mpIP = mpTarget->createMCInstPrinter(mpAsmInfo->getAssemblerDialect(),
1202 *mpAsmInfo);
1203
1204 const BufferMemoryObject *BufferMObj = new BufferMemoryObject(Start,
1205 Length);
1206 uint64_t Size;
1207 uint64_t Index;
1208
1209 for (Index = 0; Index < Length; Index += Size) {
1210 llvm::MCInst Inst;
1211
1212 if (mpDisassmbler->getInstruction(Inst, Size, *BufferMObj, Index,
1213 /* REMOVED */ llvm::nulls())) {
1214 (*OS).indent(4)
1215 .write("0x", 2)
1216 .write_hex((uint32_t) Start + Index)
1217 .write(':');
1218 mpIP->printInst(&Inst, *OS);
1219 *OS << "\n";
1220 } else {
1221 if (Size == 0)
1222 Size = 1; // skip illegible bytes
1223 }
1224 }
1225
1226 *OS << "\n";
1227 delete BufferMObj;
1228
1229#if defined(USE_DISASSEMBLER_FILE)
1230 // If you want the disassemble results write to file, uncomment this.
1231 OS->close();
1232 delete OS;
1233#endif
1234
Logane57b3322010-11-27 18:21:00 +08001235#endif // defined(USE_DISASSEMBLER)
Logan28325bf2010-11-26 23:27:41 +08001236}
Logan28325bf2010-11-26 23:27:41 +08001237
1238
1239void CodeEmitter::setTargetMachine(llvm::TargetMachine &TM) {
1240 // Set Target
1241 mpTarget = &TM.getTarget();
1242 // Set TargetJITInfo
1243 mpTJI = TM.getJITInfo();
1244 // set TargetData
1245 mpTD = TM.getTargetData();
1246
1247 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
1248
1249 return;
1250}
1251
1252
1253// This callback is invoked when the specified function is about to be code
1254// generated. This initializes the BufferBegin/End/Ptr fields.
1255void CodeEmitter::startFunction(llvm::MachineFunction &F) {
1256 uintptr_t ActualSize = 0;
1257
1258 mpMemMgr->setMemoryWritable();
1259
1260 // BufferBegin, BufferEnd and CurBufferPtr are all inherited from class
1261 // MachineCodeEmitter, which is the super class of the class
1262 // JITCodeEmitter.
1263 //
1264 // BufferBegin/BufferEnd - Pointers to the start and end of the memory
1265 // allocated for this code buffer.
1266 //
1267 // CurBufferPtr - Pointer to the next byte of memory to fill when emitting
1268 // code. This is guranteed to be in the range
1269 // [BufferBegin, BufferEnd]. If this pointer is at
1270 // BufferEnd, it will never move due to code emission, and
1271 // all code emission requests will be ignored (this is the
1272 // buffer overflow condition).
1273 BufferBegin = CurBufferPtr =
1274 mpMemMgr->startFunctionBody(F.getFunction(), ActualSize);
1275 BufferEnd = BufferBegin + ActualSize;
1276
1277 if (mpCurEmitFunction == NULL)
Logan1db37e32010-11-27 14:41:23 +08001278 mpCurEmitFunction = new EmittedFuncEntry();
Logan28325bf2010-11-26 23:27:41 +08001279 mpCurEmitFunction->FunctionBody = BufferBegin;
1280
1281 // Ensure the constant pool/jump table info is at least 4-byte aligned.
1282 emitAlignment(16);
1283
1284 emitConstantPool(F.getConstantPool());
1285 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1286 initJumpTableInfo(MJTI);
1287
1288 // About to start emitting the machine code for the function.
1289 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
1290
1291 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
1292
1293 mpCurEmitFunction->Code = CurBufferPtr;
1294
1295 mMBBLocations.clear();
1296}
1297
1298
1299// This callback is invoked when the specified function has finished code
1300// generation. If a buffer overflow has occurred, this method returns true
1301// (the callee is required to try again).
1302bool CodeEmitter::finishFunction(llvm::MachineFunction &F) {
1303 if (CurBufferPtr == BufferEnd) {
1304 // No enough memory
1305 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1306 return false;
1307 }
1308
1309 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1310 emitJumpTableInfo(MJTI);
1311
1312 // FnStart is the start of the text, not the start of the constant pool
1313 // and other per-function data.
1314 uint8_t *FnStart =
1315 reinterpret_cast<uint8_t*>(
1316 GetPointerToGlobalIfAvailable(F.getFunction()));
1317
1318 // FnEnd is the end of the function's machine code.
1319 uint8_t *FnEnd = CurBufferPtr;
1320
1321 if (!mRelocations.empty()) {
1322 ptrdiff_t BufferOffset = BufferBegin - mpMemMgr->getCodeMemBase();
1323
1324 // Resolve the relocations to concrete pointers.
1325 for (int i = 0, e = mRelocations.size(); i != e; i++) {
1326 llvm::MachineRelocation &MR = mRelocations[i];
1327 void *ResultPtr = NULL;
1328
1329 if (!MR.letTargetResolve()) {
1330 if (MR.isExternalSymbol()) {
1331 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
1332
1333 if (MR.mayNeedFarStub()) {
1334 ResultPtr = GetExternalFunctionStub(ResultPtr);
1335 }
1336
1337 } else if (MR.isGlobalValue()) {
1338 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
1339 BufferBegin
1340 + MR.getMachineCodeOffset(),
1341 MR.mayNeedFarStub());
1342 } else if (MR.isIndirectSymbol()) {
1343 ResultPtr =
1344 GetPointerToGVIndirectSym(
1345 MR.getGlobalValue(),
1346 BufferBegin + MR.getMachineCodeOffset());
1347 } else if (MR.isBasicBlock()) {
1348 ResultPtr =
1349 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
1350 } else if (MR.isConstantPoolIndex()) {
1351 ResultPtr =
1352 (void*) getConstantPoolEntryAddress(MR.getConstantPoolIndex());
1353 } else {
1354 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
1355 ResultPtr =
1356 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
1357 }
1358
1359 if (!MR.isExternalSymbol() || MR.mayNeedFarStub()) {
1360 // TODO(logan): Cache external symbol relocation entry.
1361 // Currently, we are not caching them. But since Android
1362 // system is using prelink, it is not a problem.
1363
1364 // Cache the relocation result address
1365 mCachingRelocations.push_back(
1366 oBCCRelocEntry(MR.getRelocationType(),
1367 MR.getMachineCodeOffset() + BufferOffset,
1368 ResultPtr));
1369 }
1370
1371 MR.setResultPointer(ResultPtr);
1372 }
1373 }
1374
1375 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
1376 mpMemMgr->getGOTBase());
1377 }
1378
1379 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1380 // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
1381 // global variables that were referenced in the relocations.
1382 if (CurBufferPtr == BufferEnd)
1383 return false;
1384
1385 // Now that we've succeeded in emitting the function.
1386 mpCurEmitFunction->Size = CurBufferPtr - BufferBegin;
1387 BufferBegin = CurBufferPtr = 0;
1388
1389 if (F.getFunction()->hasName())
1390 mEmittedFunctions[F.getFunction()->getNameStr()] = mpCurEmitFunction;
1391 mpCurEmitFunction = NULL;
1392
1393 mRelocations.clear();
1394 mConstPoolAddresses.clear();
1395
1396 if (mpMMI)
1397 mpMMI->EndFunction();
1398
1399 updateFunctionStub(F.getFunction());
1400
1401 // Mark code region readable and executable if it's not so already.
1402 mpMemMgr->setMemoryExecutable();
1403
1404 Disassemble(F.getFunction()->getName(), FnStart, FnEnd - FnStart, false);
1405
1406 return false;
1407}
1408
1409
1410void CodeEmitter::startGVStub(const llvm::GlobalValue *GV, unsigned StubSize,
1411 unsigned Alignment) {
1412 mpSavedBufferBegin = BufferBegin;
1413 mpSavedBufferEnd = BufferEnd;
1414 mpSavedCurBufferPtr = CurBufferPtr;
1415
1416 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
1417 Alignment);
1418 BufferEnd = BufferBegin + StubSize + 1;
1419
1420 return;
1421}
1422
1423
1424void CodeEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1425 mpSavedBufferBegin = BufferBegin;
1426 mpSavedBufferEnd = BufferEnd;
1427 mpSavedCurBufferPtr = CurBufferPtr;
1428
1429 BufferBegin = CurBufferPtr = reinterpret_cast<uint8_t *>(Buffer);
1430 BufferEnd = BufferBegin + StubSize + 1;
1431
1432 return;
1433}
1434
1435
1436void CodeEmitter::finishGVStub() {
1437 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1438
1439 // restore
1440 BufferBegin = mpSavedBufferBegin;
1441 BufferEnd = mpSavedBufferEnd;
1442 CurBufferPtr = mpSavedCurBufferPtr;
1443}
1444
1445
1446// Allocates and fills storage for an indirect GlobalValue, and returns the
1447// address.
1448void *CodeEmitter::allocIndirectGV(const llvm::GlobalValue *GV,
1449 const uint8_t *Buffer, size_t Size,
1450 unsigned Alignment) {
1451 uint8_t *IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
1452 memcpy(IndGV, Buffer, Size);
1453 return IndGV;
1454}
1455
1456
1457// Allocate memory for a global. Unlike allocateSpace, this method does not
1458// allocate memory in the current output buffer, because a global may live
1459// longer than the current function.
1460void *CodeEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1461 // Delegate this call through the memory manager.
1462 return mpMemMgr->allocateGlobal(Size, Alignment);
1463}
1464
1465
1466// This should be called by the target when a new basic block is about to be
1467// emitted. This way the MCE knows where the start of the block is, and can
1468// implement getMachineBasicBlockAddress.
1469void CodeEmitter::StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
1470 if (mMBBLocations.size() <= (unsigned) MBB->getNumber())
1471 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
1472 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
1473 return;
1474}
1475
1476
1477// Return the address of the jump table with index @Index in the function
1478// that last called initJumpTableInfo.
1479uintptr_t CodeEmitter::getJumpTableEntryAddress(unsigned Index) const {
1480 const std::vector<llvm::MachineJumpTableEntry> &JT =
1481 mpJumpTable->getJumpTables();
1482
1483 assert((Index < JT.size()) && "Invalid jump table index!");
1484
1485 unsigned int Offset = 0;
1486 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
1487
1488 for (unsigned i = 0; i < Index; i++)
1489 Offset += JT[i].MBBs.size();
1490 Offset *= EntrySize;
1491
1492 return (uintptr_t)(reinterpret_cast<uint8_t*>(mpJumpTableBase) + Offset);
1493}
1494
1495
1496// Return the address of the specified MachineBasicBlock, only usable after
1497// the label for the MBB has been emitted.
1498uintptr_t CodeEmitter::getMachineBasicBlockAddress(
1499 llvm::MachineBasicBlock *MBB) const {
1500 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
1501 mMBBLocations[MBB->getNumber()] &&
1502 "MBB not emitted!");
1503 return mMBBLocations[MBB->getNumber()];
1504}
1505
1506
1507void CodeEmitter::updateFunctionStub(const llvm::Function *F) {
1508 // Get the empty stub we generated earlier.
1509 void *Stub;
1510 std::set<const llvm::Function*>::iterator I = PendingFunctions.find(F);
1511 if (I != PendingFunctions.end())
1512 Stub = mFunctionToLazyStubMap[F];
1513 else
1514 return;
1515
1516 void *Addr = GetPointerToGlobalIfAvailable(F);
1517
1518 assert(Addr != Stub &&
1519 "Function must have non-stub address to be updated.");
1520
1521 // Tell the target jit info to rewrite the stub at the specified address,
1522 // rather than creating a new one.
1523 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1524 startGVStub(Stub, SL.Size);
1525 mpTJI->emitFunctionStub(F, Addr, *this);
1526 finishGVStub();
1527
1528 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1529 SL.Size, true);
1530
1531 PendingFunctions.erase(I);
1532}
1533
1534
Loganbce48b92010-11-27 16:44:24 +08001535void *CodeEmitter::lookup(const llvm::StringRef &Name) {
1536 EmittedFunctionsMapTy::const_iterator
1537 I = mEmittedFunctions.find(Name.str());
1538
1539 return (I == mEmittedFunctions.end()) ? NULL : I->second->Code;
1540}
1541
1542
Logan28325bf2010-11-26 23:27:41 +08001543void CodeEmitter::getFunctionNames(BCCsizei *actualFunctionCount,
1544 BCCsizei maxFunctionCount,
1545 BCCchar **functions) {
1546 int functionCount = mEmittedFunctions.size();
1547
1548 if (actualFunctionCount)
1549 *actualFunctionCount = functionCount;
1550 if (functionCount > maxFunctionCount)
1551 functionCount = maxFunctionCount;
1552 if (functions)
1553 for (EmittedFunctionsMapTy::const_iterator
1554 I = mEmittedFunctions.begin(), E = mEmittedFunctions.end();
1555 I != E && (functionCount > 0); I++, functionCount--) {
1556 *functions++ = const_cast<BCCchar*>(I->first.c_str());
1557 }
1558}
1559
1560
1561void CodeEmitter::getFunctionBinary(BCCchar *label,
1562 BCCvoid **base,
1563 BCCsizei *length) {
1564 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(label);
1565 if (I == mEmittedFunctions.end()) {
1566 *base = NULL;
1567 *length = 0;
1568 } else {
1569 *base = I->second->Code;
1570 *length = I->second->Size;
1571 }
1572}
1573
1574} // namespace bcc