blob: 78b7bd625da4212cd0ab6252397d502c7000f35d [file] [log] [blame]
Logan28325bf2010-11-26 23:27:41 +08001/*
2 * Copyright 2010, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Loganc4395232010-11-27 18:54:17 +080017#include "CodeEmitter.h"
Logan28325bf2010-11-26 23:27:41 +080018
Loganc4395232010-11-27 18:54:17 +080019#include "CodeMemoryManager.h"
Logan5a765f72010-12-29 01:53:52 +080020#include "EmittedFuncInfo.h"
Loganc4395232010-11-27 18:54:17 +080021#include "Runtime.h"
Logan28325bf2010-11-26 23:27:41 +080022
23#include <bcc/bcc.h>
24#include <bcc/bcc_cache.h>
25
26#include "llvm/ADT/APFloat.h"
27#include "llvm/ADT/APInt.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/StringRef.h"
31
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineConstantPool.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineModuleInfo.h"
36#include "llvm/CodeGen/MachineRelocation.h"
37#include "llvm/CodeGen/MachineJumpTableInfo.h"
38#include "llvm/CodeGen/JITCodeEmitter.h"
39
40#include "llvm/ExecutionEngine/GenericValue.h"
41
42#include "llvm/MC/MCAsmInfo.h"
43#include "llvm/MC/MCDisassembler.h"
44#include "llvm/MC/MCInst.h"
45#include "llvm/MC/MCInstPrinter.h"
46
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/raw_ostream.h"
49
Logan2037d722010-11-27 14:31:56 +080050#if defined(USE_DISASSEMBLER)
51#include "llvm/Support/MemoryObject.h"
52#endif
53
Logan28325bf2010-11-26 23:27:41 +080054#include "llvm/System/Host.h"
55
56#include "llvm/Target/TargetData.h"
57#include "llvm/Target/TargetMachine.h"
58#include "llvm/Target/TargetRegistry.h"
59#include "llvm/Target/TargetJITInfo.h"
60
61#include "llvm/Constant.h"
62#include "llvm/Constants.h"
63#include "llvm/DerivedTypes.h"
64#include "llvm/Function.h"
65#include "llvm/GlobalAlias.h"
66#include "llvm/GlobalValue.h"
67#include "llvm/GlobalVariable.h"
68#include "llvm/Instruction.h"
69#include "llvm/Type.h"
70
71#include <algorithm>
72#include <vector>
73#include <set>
74#include <string>
75
76#include <stddef.h>
77
78
Logan2037d722010-11-27 14:31:56 +080079namespace {
80
81#if defined(USE_DISASSEMBLER)
82class BufferMemoryObject : public llvm::MemoryObject {
83private:
84 const uint8_t *mBytes;
85 uint64_t mLength;
86
87public:
88 BufferMemoryObject(const uint8_t *Bytes, uint64_t Length)
89 : mBytes(Bytes), mLength(Length) {
90 }
91
92 virtual uint64_t getBase() const { return 0; }
93 virtual uint64_t getExtent() const { return mLength; }
94
95 virtual int readByte(uint64_t Addr, uint8_t *Byte) const {
96 if (Addr > getExtent())
97 return -1;
98 *Byte = mBytes[Addr];
99 return 0;
100 }
101};
102#endif
103
104}; // namespace anonymous
105
106
Logan28325bf2010-11-26 23:27:41 +0800107namespace bcc {
108
109// Will take the ownership of @MemMgr
110CodeEmitter::CodeEmitter(CodeMemoryManager *pMemMgr)
111 : mpMemMgr(pMemMgr),
112 mpTarget(NULL),
113 mpTJI(NULL),
114 mpTD(NULL),
115 mpCurEmitFunction(NULL),
116 mpConstantPool(NULL),
117 mpJumpTable(NULL),
118 mpMMI(NULL),
119#if defined(USE_DISASSEMBLER)
120 mpAsmInfo(NULL),
121 mpDisassmbler(NULL),
122 mpIP(NULL),
123#endif
124 mpSymbolLookupFn(NULL),
125 mpSymbolLookupContext(NULL) {
126}
127
128
129CodeEmitter::~CodeEmitter() {
Logan28325bf2010-11-26 23:27:41 +0800130#if defined(USE_DISASSEMBLER)
131 delete mpAsmInfo;
132 delete mpDisassmbler;
133 delete mpIP;
134#endif
135}
136
137
138// Once you finish the compilation on a translation unit, you can call this
139// function to recycle the memory (which is used at compilation time and not
140// needed for runtime).
141//
142// NOTE: You should not call this funtion until the code-gen passes for a
143// given module is done. Otherwise, the results is undefined and may
144// cause the system crash!
145void CodeEmitter::releaseUnnecessary() {
146 mMBBLocations.clear();
147 mLabelLocations.clear();
148 mGlobalAddressMap.clear();
149 mFunctionToLazyStubMap.clear();
150 GlobalToIndirectSymMap.clear();
151 ExternalFnToStubMap.clear();
152 PendingFunctions.clear();
153}
154
155
156void CodeEmitter::reset() {
157 releaseUnnecessary();
158
159 mpSymbolLookupFn = NULL;
160 mpSymbolLookupContext = NULL;
161
162 mpTJI = NULL;
163 mpTD = NULL;
164
165 for (EmittedFunctionsMapTy::iterator I = mEmittedFunctions.begin(),
166 E = mEmittedFunctions.end();
167 I != E;
168 I++)
169 if (I->second != NULL)
170 delete I->second;
171 mEmittedFunctions.clear();
172
173 mpMemMgr->reset();
174}
175
176
177void *CodeEmitter::UpdateGlobalMapping(const llvm::GlobalValue *GV, void *Addr) {
178 if (Addr == NULL) {
179 // Removing mapping
180 GlobalAddressMapTy::iterator I = mGlobalAddressMap.find(GV);
181 void *OldVal;
182
183 if (I == mGlobalAddressMap.end()) {
184 OldVal = NULL;
185 } else {
186 OldVal = I->second;
187 mGlobalAddressMap.erase(I);
188 }
189
190 return OldVal;
191 }
192
193 void *&CurVal = mGlobalAddressMap[GV];
194 void *OldVal = CurVal;
195
196 CurVal = Addr;
197
198 return OldVal;
199}
200
201
202unsigned int CodeEmitter::GetConstantPoolSizeInBytes(
203 llvm::MachineConstantPool *MCP) {
204 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
205 MCP->getConstants();
206
207 if (Constants.empty())
208 return 0;
209
210 unsigned int Size = 0;
211 for (int i = 0, e = Constants.size(); i != e; i++) {
212 llvm::MachineConstantPoolEntry CPE = Constants[i];
213 unsigned int AlignMask = CPE.getAlignment() - 1;
214 Size = (Size + AlignMask) & ~AlignMask;
215 const llvm::Type *Ty = CPE.getType();
216 Size += mpTD->getTypeAllocSize(Ty);
217 }
218
219 return Size;
220}
221
222// This function converts a Constant* into a GenericValue. The interesting
223// part is if C is a ConstantExpr.
224void CodeEmitter::GetConstantValue(const llvm::Constant *C,
225 llvm::GenericValue &Result) {
226 if (C->getValueID() == llvm::Value::UndefValueVal)
227 return;
228 else if (C->getValueID() == llvm::Value::ConstantExprVal) {
229 const llvm::ConstantExpr *CE = (llvm::ConstantExpr*) C;
230 const llvm::Constant *Op0 = CE->getOperand(0);
231
232 switch (CE->getOpcode()) {
233 case llvm::Instruction::GetElementPtr: {
234 // Compute the index
235 llvm::SmallVector<llvm::Value*, 8> Indices(CE->op_begin() + 1,
236 CE->op_end());
237 uint64_t Offset = mpTD->getIndexedOffset(Op0->getType(),
238 &Indices[0],
239 Indices.size());
240
241 GetConstantValue(Op0, Result);
242 Result.PointerVal =
243 static_cast<uint8_t*>(Result.PointerVal) + Offset;
244
245 return;
246 }
247 case llvm::Instruction::Trunc: {
248 uint32_t BitWidth =
249 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
250
251 GetConstantValue(Op0, Result);
252 Result.IntVal = Result.IntVal.trunc(BitWidth);
253
254 return;
255 }
256 case llvm::Instruction::ZExt: {
257 uint32_t BitWidth =
258 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
259
260 GetConstantValue(Op0, Result);
261 Result.IntVal = Result.IntVal.zext(BitWidth);
262
263 return;
264 }
265 case llvm::Instruction::SExt: {
266 uint32_t BitWidth =
267 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
268
269 GetConstantValue(Op0, Result);
270 Result.IntVal = Result.IntVal.sext(BitWidth);
271
272 return;
273 }
274 case llvm::Instruction::FPTrunc: {
275 // TODO(all): fixme: long double
276 GetConstantValue(Op0, Result);
277 Result.FloatVal = static_cast<float>(Result.DoubleVal);
278 return;
279 }
280 case llvm::Instruction::FPExt: {
281 // TODO(all): fixme: long double
282 GetConstantValue(Op0, Result);
283 Result.DoubleVal = static_cast<double>(Result.FloatVal);
284 return;
285 }
286 case llvm::Instruction::UIToFP: {
287 GetConstantValue(Op0, Result);
288 if (CE->getType()->isFloatTy())
289 Result.FloatVal =
290 static_cast<float>(Result.IntVal.roundToDouble());
291 else if (CE->getType()->isDoubleTy())
292 Result.DoubleVal = Result.IntVal.roundToDouble();
293 else if (CE->getType()->isX86_FP80Ty()) {
294 const uint64_t zero[] = { 0, 0 };
295 llvm::APFloat apf(llvm::APInt(80, 2, zero));
296 apf.convertFromAPInt(Result.IntVal,
297 false,
298 llvm::APFloat::rmNearestTiesToEven);
299 Result.IntVal = apf.bitcastToAPInt();
300 }
301 return;
302 }
303 case llvm::Instruction::SIToFP: {
304 GetConstantValue(Op0, Result);
305 if (CE->getType()->isFloatTy())
306 Result.FloatVal =
307 static_cast<float>(Result.IntVal.signedRoundToDouble());
308 else if (CE->getType()->isDoubleTy())
309 Result.DoubleVal = Result.IntVal.signedRoundToDouble();
310 else if (CE->getType()->isX86_FP80Ty()) {
311 const uint64_t zero[] = { 0, 0 };
312 llvm::APFloat apf = llvm::APFloat(llvm::APInt(80, 2, zero));
313 apf.convertFromAPInt(Result.IntVal,
314 true,
315 llvm::APFloat::rmNearestTiesToEven);
316 Result.IntVal = apf.bitcastToAPInt();
317 }
318 return;
319 }
320 // double->APInt conversion handles sign
321 case llvm::Instruction::FPToUI:
322 case llvm::Instruction::FPToSI: {
323 uint32_t BitWidth =
324 llvm::cast<llvm::IntegerType>(CE->getType())->getBitWidth();
325
326 GetConstantValue(Op0, Result);
327 if (Op0->getType()->isFloatTy())
328 Result.IntVal =
329 llvm::APIntOps::RoundFloatToAPInt(Result.FloatVal, BitWidth);
330 else if (Op0->getType()->isDoubleTy())
331 Result.IntVal =
332 llvm::APIntOps::RoundDoubleToAPInt(Result.DoubleVal,
333 BitWidth);
334 else if (Op0->getType()->isX86_FP80Ty()) {
335 llvm::APFloat apf = llvm::APFloat(Result.IntVal);
336 uint64_t V;
337 bool Ignored;
338 apf.convertToInteger(&V,
339 BitWidth,
340 CE->getOpcode() == llvm::Instruction::FPToSI,
341 llvm::APFloat::rmTowardZero,
342 &Ignored);
343 Result.IntVal = V; // endian?
344 }
345 return;
346 }
347 case llvm::Instruction::PtrToInt: {
348 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
349
350 GetConstantValue(Op0, Result);
351 Result.IntVal = llvm::APInt(PtrWidth, uintptr_t
352 (Result.PointerVal));
353
354 return;
355 }
356 case llvm::Instruction::IntToPtr: {
357 uint32_t PtrWidth = mpTD->getPointerSizeInBits();
358
359 GetConstantValue(Op0, Result);
360 if (PtrWidth != Result.IntVal.getBitWidth())
361 Result.IntVal = Result.IntVal.zextOrTrunc(PtrWidth);
362 assert(Result.IntVal.getBitWidth() <= 64 && "Bad pointer width");
363
364 Result.PointerVal =
365 llvm::PointerTy(
366 static_cast<uintptr_t>(Result.IntVal.getZExtValue()));
367
368 return;
369 }
370 case llvm::Instruction::BitCast: {
371 GetConstantValue(Op0, Result);
372 const llvm::Type *DestTy = CE->getType();
373
374 switch (Op0->getType()->getTypeID()) {
375 case llvm::Type::IntegerTyID: {
376 assert(DestTy->isFloatingPointTy() && "invalid bitcast");
377 if (DestTy->isFloatTy())
378 Result.FloatVal = Result.IntVal.bitsToFloat();
379 else if (DestTy->isDoubleTy())
380 Result.DoubleVal = Result.IntVal.bitsToDouble();
381 break;
382 }
383 case llvm::Type::FloatTyID: {
384 assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
385 Result.IntVal.floatToBits(Result.FloatVal);
386 break;
387 }
388 case llvm::Type::DoubleTyID: {
389 assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
390 Result.IntVal.doubleToBits(Result.DoubleVal);
391 break;
392 }
393 case llvm::Type::PointerTyID: {
394 assert(DestTy->isPointerTy() && "Invalid bitcast");
395 break; // getConstantValue(Op0) above already converted it
396 }
397 default: {
398 llvm_unreachable("Invalid bitcast operand");
399 }
400 }
401 return;
402 }
403 case llvm::Instruction::Add:
404 case llvm::Instruction::FAdd:
405 case llvm::Instruction::Sub:
406 case llvm::Instruction::FSub:
407 case llvm::Instruction::Mul:
408 case llvm::Instruction::FMul:
409 case llvm::Instruction::UDiv:
410 case llvm::Instruction::SDiv:
411 case llvm::Instruction::URem:
412 case llvm::Instruction::SRem:
413 case llvm::Instruction::And:
414 case llvm::Instruction::Or:
415 case llvm::Instruction::Xor: {
416 llvm::GenericValue LHS, RHS;
417 GetConstantValue(Op0, LHS);
418 GetConstantValue(CE->getOperand(1), RHS);
419
420 switch (Op0->getType()->getTypeID()) {
421 case llvm::Type::IntegerTyID: {
422 switch (CE->getOpcode()) {
423 case llvm::Instruction::Add: {
424 Result.IntVal = LHS.IntVal + RHS.IntVal;
425 break;
426 }
427 case llvm::Instruction::Sub: {
428 Result.IntVal = LHS.IntVal - RHS.IntVal;
429 break;
430 }
431 case llvm::Instruction::Mul: {
432 Result.IntVal = LHS.IntVal * RHS.IntVal;
433 break;
434 }
435 case llvm::Instruction::UDiv: {
436 Result.IntVal = LHS.IntVal.udiv(RHS.IntVal);
437 break;
438 }
439 case llvm::Instruction::SDiv: {
440 Result.IntVal = LHS.IntVal.sdiv(RHS.IntVal);
441 break;
442 }
443 case llvm::Instruction::URem: {
444 Result.IntVal = LHS.IntVal.urem(RHS.IntVal);
445 break;
446 }
447 case llvm::Instruction::SRem: {
448 Result.IntVal = LHS.IntVal.srem(RHS.IntVal);
449 break;
450 }
451 case llvm::Instruction::And: {
452 Result.IntVal = LHS.IntVal & RHS.IntVal;
453 break;
454 }
455 case llvm::Instruction::Or: {
456 Result.IntVal = LHS.IntVal | RHS.IntVal;
457 break;
458 }
459 case llvm::Instruction::Xor: {
460 Result.IntVal = LHS.IntVal ^ RHS.IntVal;
461 break;
462 }
463 default: {
464 llvm_unreachable("Invalid integer opcode");
465 }
466 }
467 break;
468 }
469 case llvm::Type::FloatTyID: {
470 switch (CE->getOpcode()) {
471 case llvm::Instruction::FAdd: {
472 Result.FloatVal = LHS.FloatVal + RHS.FloatVal;
473 break;
474 }
475 case llvm::Instruction::FSub: {
476 Result.FloatVal = LHS.FloatVal - RHS.FloatVal;
477 break;
478 }
479 case llvm::Instruction::FMul: {
480 Result.FloatVal = LHS.FloatVal * RHS.FloatVal;
481 break;
482 }
483 case llvm::Instruction::FDiv: {
484 Result.FloatVal = LHS.FloatVal / RHS.FloatVal;
485 break;
486 }
487 case llvm::Instruction::FRem: {
488 Result.FloatVal = ::fmodf(LHS.FloatVal, RHS.FloatVal);
489 break;
490 }
491 default: {
492 llvm_unreachable("Invalid float opcode");
493 }
494 }
495 break;
496 }
497 case llvm::Type::DoubleTyID: {
498 switch (CE->getOpcode()) {
499 case llvm::Instruction::FAdd: {
500 Result.DoubleVal = LHS.DoubleVal + RHS.DoubleVal;
501 break;
502 }
503 case llvm::Instruction::FSub: {
504 Result.DoubleVal = LHS.DoubleVal - RHS.DoubleVal;
505 break;
506 }
507 case llvm::Instruction::FMul: {
508 Result.DoubleVal = LHS.DoubleVal * RHS.DoubleVal;
509 break;
510 }
511 case llvm::Instruction::FDiv: {
512 Result.DoubleVal = LHS.DoubleVal / RHS.DoubleVal;
513 break;
514 }
515 case llvm::Instruction::FRem: {
516 Result.DoubleVal = ::fmod(LHS.DoubleVal, RHS.DoubleVal);
517 break;
518 }
519 default: {
520 llvm_unreachable("Invalid double opcode");
521 }
522 }
523 break;
524 }
525 case llvm::Type::X86_FP80TyID:
526 case llvm::Type::PPC_FP128TyID:
527 case llvm::Type::FP128TyID: {
528 llvm::APFloat apfLHS = llvm::APFloat(LHS.IntVal);
529 switch (CE->getOpcode()) {
530 case llvm::Instruction::FAdd: {
531 apfLHS.add(llvm::APFloat(RHS.IntVal),
532 llvm::APFloat::rmNearestTiesToEven);
533 break;
534 }
535 case llvm::Instruction::FSub: {
536 apfLHS.subtract(llvm::APFloat(RHS.IntVal),
537 llvm::APFloat::rmNearestTiesToEven);
538 break;
539 }
540 case llvm::Instruction::FMul: {
541 apfLHS.multiply(llvm::APFloat(RHS.IntVal),
542 llvm::APFloat::rmNearestTiesToEven);
543 break;
544 }
545 case llvm::Instruction::FDiv: {
546 apfLHS.divide(llvm::APFloat(RHS.IntVal),
547 llvm::APFloat::rmNearestTiesToEven);
548 break;
549 }
550 case llvm::Instruction::FRem: {
551 apfLHS.mod(llvm::APFloat(RHS.IntVal),
552 llvm::APFloat::rmNearestTiesToEven);
553 break;
554 }
555 default: {
556 llvm_unreachable("Invalid long double opcode");
557 }
558 }
559 Result.IntVal = apfLHS.bitcastToAPInt();
560 break;
561 }
562 default: {
563 llvm_unreachable("Bad add type!");
564 }
565 } // End switch (Op0->getType()->getTypeID())
566 return;
567 }
568 default: {
569 break;
570 }
571 } // End switch (CE->getOpcode())
572
573 std::string msg;
574 llvm::raw_string_ostream Msg(msg);
575 Msg << "ConstantExpr not handled: " << *CE;
576 llvm::report_fatal_error(Msg.str());
577 } // C->getValueID() == llvm::Value::ConstantExprVal
578
579 switch (C->getType()->getTypeID()) {
580 case llvm::Type::FloatTyID: {
581 Result.FloatVal =
582 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToFloat();
583 break;
584 }
585 case llvm::Type::DoubleTyID: {
586 Result.DoubleVal =
587 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().convertToDouble();
588 break;
589 }
590 case llvm::Type::X86_FP80TyID:
591 case llvm::Type::FP128TyID:
592 case llvm::Type::PPC_FP128TyID: {
593 Result.IntVal =
594 llvm::cast<llvm::ConstantFP>(C)->getValueAPF().bitcastToAPInt();
595 break;
596 }
597 case llvm::Type::IntegerTyID: {
598 Result.IntVal =
599 llvm::cast<llvm::ConstantInt>(C)->getValue();
600 break;
601 }
602 case llvm::Type::PointerTyID: {
603 switch (C->getValueID()) {
604 case llvm::Value::ConstantPointerNullVal: {
605 Result.PointerVal = NULL;
606 break;
607 }
608 case llvm::Value::FunctionVal: {
609 const llvm::Function *F = static_cast<const llvm::Function*>(C);
610 Result.PointerVal =
611 GetPointerToFunctionOrStub(const_cast<llvm::Function*>(F));
612 break;
613 }
614 case llvm::Value::GlobalVariableVal: {
615 const llvm::GlobalVariable *GV =
616 static_cast<const llvm::GlobalVariable*>(C);
617 Result.PointerVal =
618 GetOrEmitGlobalVariable(const_cast<llvm::GlobalVariable*>(GV));
619 break;
620 }
621 case llvm::Value::BlockAddressVal: {
622 assert(false && "JIT does not support address-of-label yet!");
623 }
624 default: {
625 llvm_unreachable("Unknown constant pointer type!");
626 }
627 }
628 break;
629 }
630 default: {
631 std::string msg;
632 llvm::raw_string_ostream Msg(msg);
633 Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
634 llvm::report_fatal_error(Msg.str());
635 break;
636 }
637 }
638 return;
639}
640
641
642// Stores the data in @Val of type @Ty at address @Addr.
643void CodeEmitter::StoreValueToMemory(const llvm::GenericValue &Val,
644 void *Addr,
645 const llvm::Type *Ty) {
646 const unsigned int StoreBytes = mpTD->getTypeStoreSize(Ty);
647
648 switch (Ty->getTypeID()) {
649 case llvm::Type::IntegerTyID: {
650 const llvm::APInt &IntVal = Val.IntVal;
651 assert(((IntVal.getBitWidth() + 7) / 8 >= StoreBytes) &&
652 "Integer too small!");
653
654 const uint8_t *Src =
655 reinterpret_cast<const uint8_t*>(IntVal.getRawData());
656
657 if (llvm::sys::isLittleEndianHost()) {
658 // Little-endian host - the source is ordered from LSB to MSB.
659 // Order the destination from LSB to MSB: Do a straight copy.
660 memcpy(Addr, Src, StoreBytes);
661 } else {
662 // Big-endian host - the source is an array of 64 bit words
663 // ordered from LSW to MSW.
664 //
665 // Each word is ordered from MSB to LSB.
666 //
667 // Order the destination from MSB to LSB:
668 // Reverse the word order, but not the bytes in a word.
669 unsigned int i = StoreBytes;
670 while (i > sizeof(uint64_t)) {
671 i -= sizeof(uint64_t);
672 ::memcpy(reinterpret_cast<uint8_t*>(Addr) + i,
673 Src,
674 sizeof(uint64_t));
675 Src += sizeof(uint64_t);
676 }
677 ::memcpy(Addr, Src + sizeof(uint64_t) - i, i);
678 }
679 break;
680 }
681 case llvm::Type::FloatTyID: {
682 *reinterpret_cast<float*>(Addr) = Val.FloatVal;
683 break;
684 }
685 case llvm::Type::DoubleTyID: {
686 *reinterpret_cast<double*>(Addr) = Val.DoubleVal;
687 break;
688 }
689 case llvm::Type::X86_FP80TyID: {
690 memcpy(Addr, Val.IntVal.getRawData(), 10);
691 break;
692 }
693 case llvm::Type::PointerTyID: {
694 // Ensure 64 bit target pointers are fully initialized on 32 bit
695 // hosts.
696 if (StoreBytes != sizeof(llvm::PointerTy))
697 memset(Addr, 0, StoreBytes);
698 *((llvm::PointerTy*) Addr) = Val.PointerVal;
699 break;
700 }
701 default: {
702 break;
703 }
704 }
705
706 if (llvm::sys::isLittleEndianHost() != mpTD->isLittleEndian())
707 std::reverse(reinterpret_cast<uint8_t*>(Addr),
708 reinterpret_cast<uint8_t*>(Addr) + StoreBytes);
709
710 return;
711}
712
713
714// Recursive function to apply a @Constant value into the specified memory
715// location @Addr.
716void CodeEmitter::InitializeConstantToMemory(const llvm::Constant *C, void *Addr) {
717 switch (C->getValueID()) {
718 case llvm::Value::UndefValueVal: {
719 // Nothing to do
720 break;
721 }
722 case llvm::Value::ConstantVectorVal: {
723 // dynamic cast may hurt performance
724 const llvm::ConstantVector *CP = (llvm::ConstantVector*) C;
725
726 unsigned int ElementSize = mpTD->getTypeAllocSize
727 (CP->getType()->getElementType());
728
729 for (int i = 0, e = CP->getNumOperands(); i != e;i++)
730 InitializeConstantToMemory(
731 CP->getOperand(i),
732 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
733 break;
734 }
735 case llvm::Value::ConstantAggregateZeroVal: {
736 memset(Addr, 0, (size_t) mpTD->getTypeAllocSize(C->getType()));
737 break;
738 }
739 case llvm::Value::ConstantArrayVal: {
740 const llvm::ConstantArray *CPA = (llvm::ConstantArray*) C;
741 unsigned int ElementSize = mpTD->getTypeAllocSize
742 (CPA->getType()->getElementType());
743
744 for (int i = 0, e = CPA->getNumOperands(); i != e; i++)
745 InitializeConstantToMemory(
746 CPA->getOperand(i),
747 reinterpret_cast<uint8_t*>(Addr) + i * ElementSize);
748 break;
749 }
750 case llvm::Value::ConstantStructVal: {
751 const llvm::ConstantStruct *CPS =
752 static_cast<const llvm::ConstantStruct*>(C);
753 const llvm::StructLayout *SL = mpTD->getStructLayout
754 (llvm::cast<llvm::StructType>(CPS->getType()));
755
756 for (int i = 0, e = CPS->getNumOperands(); i != e; i++)
757 InitializeConstantToMemory(
758 CPS->getOperand(i),
759 reinterpret_cast<uint8_t*>(Addr) + SL->getElementOffset(i));
760 break;
761 }
762 default: {
763 if (C->getType()->isFirstClassType()) {
764 llvm::GenericValue Val;
765 GetConstantValue(C, Val);
766 StoreValueToMemory(Val, Addr, C->getType());
767 } else {
768 llvm_unreachable("Unknown constant type to initialize memory "
769 "with!");
770 }
771 break;
772 }
773 }
774 return;
775}
776
777
778void CodeEmitter::emitConstantPool(llvm::MachineConstantPool *MCP) {
779 if (mpTJI->hasCustomConstantPool())
780 return;
781
782 // Constant pool address resolution is handled by the target itself in ARM
783 // (TargetJITInfo::hasCustomConstantPool() returns true).
784#if !defined(PROVIDE_ARM_CODEGEN)
785 const std::vector<llvm::MachineConstantPoolEntry> &Constants =
786 MCP->getConstants();
787
788 if (Constants.empty())
789 return;
790
791 unsigned Size = GetConstantPoolSizeInBytes(MCP);
792 unsigned Align = MCP->getConstantPoolAlignment();
793
794 mpConstantPoolBase = allocateSpace(Size, Align);
795 mpConstantPool = MCP;
796
797 if (mpConstantPoolBase == NULL)
798 return; // out of memory
799
800 unsigned Offset = 0;
801 for (int i = 0, e = Constants.size(); i != e; i++) {
802 llvm::MachineConstantPoolEntry CPE = Constants[i];
803 unsigned AlignMask = CPE.getAlignment() - 1;
804 Offset = (Offset + AlignMask) & ~AlignMask;
805
806 uintptr_t CAddr = (uintptr_t) mpConstantPoolBase + Offset;
807 mConstPoolAddresses.push_back(CAddr);
808
809 if (CPE.isMachineConstantPoolEntry())
810 llvm::report_fatal_error
811 ("Initialize memory with machine specific constant pool"
812 " entry has not been implemented!");
813
814 InitializeConstantToMemory(CPE.Val.ConstVal, (void*) CAddr);
815
816 const llvm::Type *Ty = CPE.Val.ConstVal->getType();
817 Offset += mpTD->getTypeAllocSize(Ty);
818 }
819#endif
820 return;
821}
822
823
824void CodeEmitter::initJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
825 if (mpTJI->hasCustomJumpTables())
826 return;
827
828 const std::vector<llvm::MachineJumpTableEntry> &JT =
829 MJTI->getJumpTables();
830 if (JT.empty())
831 return;
832
833 unsigned NumEntries = 0;
834 for (int i = 0, e = JT.size(); i != e; i++)
835 NumEntries += JT[i].MBBs.size();
836
837 unsigned EntrySize = MJTI->getEntrySize(*mpTD);
838
839 mpJumpTable = MJTI;
840 mpJumpTableBase = allocateSpace(NumEntries * EntrySize,
841 MJTI->getEntryAlignment(*mpTD));
842
843 return;
844}
845
846
847void CodeEmitter::emitJumpTableInfo(llvm::MachineJumpTableInfo *MJTI) {
848 if (mpTJI->hasCustomJumpTables())
849 return;
850
851 const std::vector<llvm::MachineJumpTableEntry> &JT =
852 MJTI->getJumpTables();
853 if (JT.empty() || mpJumpTableBase == 0)
854 return;
855
856 assert(llvm::TargetMachine::getRelocationModel() == llvm::Reloc::Static &&
857 (MJTI->getEntrySize(*mpTD) == sizeof(mpTD /* a pointer type */)) &&
858 "Cross JIT'ing?");
859
860 // For each jump table, map each target in the jump table to the
861 // address of an emitted MachineBasicBlock.
862 intptr_t *SlotPtr = reinterpret_cast<intptr_t*>(mpJumpTableBase);
863 for (int i = 0, ie = JT.size(); i != ie; i++) {
864 const std::vector<llvm::MachineBasicBlock*> &MBBs = JT[i].MBBs;
865 // Store the address of the basic block for this jump table slot in the
866 // memory we allocated for the jump table in 'initJumpTableInfo'
867 for (int j = 0, je = MBBs.size(); j != je; j++)
868 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[j]);
869 }
870}
871
872
873void *CodeEmitter::GetPointerToGlobal(llvm::GlobalValue *V,
874 void *Reference,
875 bool MayNeedFarStub) {
876 switch (V->getValueID()) {
877 case llvm::Value::FunctionVal: {
878 llvm::Function *F = (llvm::Function*) V;
879
880 // If we have code, go ahead and return that.
881 if (void *ResultPtr = GetPointerToGlobalIfAvailable(F))
882 return ResultPtr;
883
884 if (void *FnStub = GetLazyFunctionStubIfAvailable(F))
885 // Return the function stub if it's already created.
886 // We do this first so that:
887 // we're returning the same address for the function as any
888 // previous call.
889 //
890 // TODO(llvm.org): Yes, this is wrong. The lazy stub isn't
891 // guaranteed to be close enough to call.
892 return FnStub;
893
894 // If we know the target can handle arbitrary-distance calls, try to
895 // return a direct pointer.
896 if (!MayNeedFarStub) {
897 //
898 // x86_64 architecture may encounter the bug:
899 // http://llvm.org/bugs/show_bug.cgi?id=5201
900 // which generate instruction "call" instead of "callq".
901 //
902 // And once the real address of stub is greater than 64-bit
903 // long, the replacement will truncate to 32-bit resulting a
904 // serious problem.
905#if !defined(__x86_64__)
906 // If this is an external function pointer, we can force the JIT
907 // to 'compile' it, which really just adds it to the map.
908 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
909 return GetPointerToFunction(F, /* AbortOnFailure = */false);
910 // Changing to false because wanting to allow later calls to
911 // mpTJI->relocate() without aborting. For caching purpose
912 }
913#endif
914 }
915
916 // Otherwise, we may need a to emit a stub, and, conservatively, we
917 // always do so.
918 return GetLazyFunctionStub(F);
919 break;
920 }
921 case llvm::Value::GlobalVariableVal: {
922 return GetOrEmitGlobalVariable((llvm::GlobalVariable*) V);
923 break;
924 }
925 case llvm::Value::GlobalAliasVal: {
926 llvm::GlobalAlias *GA = (llvm::GlobalAlias*) V;
927 const llvm::GlobalValue *GV = GA->resolveAliasedGlobal(false);
928
929 switch (GV->getValueID()) {
930 case llvm::Value::FunctionVal: {
931 // TODO(all): is there's any possibility that the function is not
932 // code-gen'd?
933 return GetPointerToFunction(
934 static_cast<const llvm::Function*>(GV),
935 /* AbortOnFailure = */false);
936 // Changing to false because wanting to allow later calls to
937 // mpTJI->relocate() without aborting. For caching purpose
938 break;
939 }
940 case llvm::Value::GlobalVariableVal: {
941 if (void *P = mGlobalAddressMap[GV])
942 return P;
943
944 llvm::GlobalVariable *GVar = (llvm::GlobalVariable*) GV;
945 EmitGlobalVariable(GVar);
946
947 return mGlobalAddressMap[GV];
948 break;
949 }
950 case llvm::Value::GlobalAliasVal: {
951 assert(false && "Alias should be resolved ultimately!");
952 }
953 }
954 break;
955 }
956 default: {
957 break;
958 }
959 }
960 llvm_unreachable("Unknown type of global value!");
961}
962
963
964// If the specified function has been code-gen'd, return a pointer to the
965// function. If not, compile it, or use a stub to implement lazy compilation
966// if available.
967void *CodeEmitter::GetPointerToFunctionOrStub(llvm::Function *F) {
968 // If we have already code generated the function, just return the
969 // address.
970 if (void *Addr = GetPointerToGlobalIfAvailable(F))
971 return Addr;
972
973 // Get a stub if the target supports it.
974 return GetLazyFunctionStub(F);
975}
976
977
978void *CodeEmitter::GetLazyFunctionStub(llvm::Function *F) {
979 // If we already have a lazy stub for this function, recycle it.
980 void *&Stub = mFunctionToLazyStubMap[F];
981 if (Stub)
982 return Stub;
983
984 // In any cases, we should NOT resolve function at runtime (though we are
985 // able to). We resolve this right now.
986 void *Actual = NULL;
987 if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
988 Actual = GetPointerToFunction(F, /* AbortOnFailure = */false);
989 // Changing to false because wanting to allow later calls to
990 // mpTJI->relocate() without aborting. For caching purpose
991 }
992
993 // Codegen a new stub, calling the actual address of the external
994 // function, if it was resolved.
995 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
996 startGVStub(F, SL.Size, SL.Alignment);
997 Stub = mpTJI->emitFunctionStub(F, Actual, *this);
998 finishGVStub();
999
1000 // We really want the address of the stub in the GlobalAddressMap for the
1001 // JIT, not the address of the external function.
1002 UpdateGlobalMapping(F, Stub);
1003
1004 if (!Actual)
1005 PendingFunctions.insert(F);
1006 else
1007 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1008 SL.Size, true);
1009
1010 return Stub;
1011}
1012
1013
1014void *CodeEmitter::GetPointerToFunction(const llvm::Function *F,
1015 bool AbortOnFailure) {
1016 void *Addr = GetPointerToGlobalIfAvailable(F);
1017 if (Addr)
1018 return Addr;
1019
1020 assert((F->isDeclaration() || F->hasAvailableExternallyLinkage()) &&
1021 "Internal error: only external defined function routes here!");
1022
1023 // Handle the failure resolution by ourselves.
1024 Addr = GetPointerToNamedSymbol(F->getName().str().c_str(),
1025 /* AbortOnFailure = */ false);
1026
1027 // If we resolved the symbol to a null address (eg. a weak external)
1028 // return a null pointer let the application handle it.
1029 if (Addr == NULL) {
1030 if (AbortOnFailure)
1031 llvm::report_fatal_error("Could not resolve external function "
1032 "address: " + F->getName());
1033 else
1034 return NULL;
1035 }
1036
1037 AddGlobalMapping(F, Addr);
1038
1039 return Addr;
1040}
1041
1042
1043void *CodeEmitter::GetPointerToNamedSymbol(const std::string &Name,
1044 bool AbortOnFailure) {
1045 if (void *Addr = FindRuntimeFunction(Name.c_str()))
1046 return Addr;
1047
1048 if (mpSymbolLookupFn)
1049 if (void *Addr = mpSymbolLookupFn(mpSymbolLookupContext, Name.c_str()))
1050 return Addr;
1051
1052 if (AbortOnFailure)
1053 llvm::report_fatal_error("Program used external symbol '" + Name +
1054 "' which could not be resolved!");
1055
1056 return NULL;
1057}
1058
1059
1060// Return the address of the specified global variable, possibly emitting it
1061// to memory if needed. This is used by the Emitter.
1062void *CodeEmitter::GetOrEmitGlobalVariable(const llvm::GlobalVariable *GV) {
1063 void *Ptr = GetPointerToGlobalIfAvailable(GV);
1064 if (Ptr)
1065 return Ptr;
1066
1067 if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) {
1068 // If the global is external, just remember the address.
1069 Ptr = GetPointerToNamedSymbol(GV->getName().str(), true);
1070 AddGlobalMapping(GV, Ptr);
1071 } else {
1072 // If the global hasn't been emitted to memory yet, allocate space and
1073 // emit it into memory.
1074 Ptr = GetMemoryForGV(GV);
1075 AddGlobalMapping(GV, Ptr);
1076 EmitGlobalVariable(GV);
1077 }
1078
1079 return Ptr;
1080}
1081
1082
1083// This method abstracts memory allocation of global variable so that the
1084// JIT can allocate thread local variables depending on the target.
1085void *CodeEmitter::GetMemoryForGV(const llvm::GlobalVariable *GV) {
1086 void *Ptr;
1087
1088 const llvm::Type *GlobalType = GV->getType()->getElementType();
1089 size_t S = mpTD->getTypeAllocSize(GlobalType);
1090 size_t A = mpTD->getPreferredAlignment(GV);
1091
1092 if (GV->isThreadLocal()) {
1093 // We can support TLS by
1094 //
1095 // Ptr = TJI.allocateThreadLocalMemory(S);
1096 //
1097 // But I tend not to.
1098 // (should we disable this in the front-end (i.e., slang)?).
1099 llvm::report_fatal_error
1100 ("Compilation of Thread Local Storage (TLS) is disabled!");
1101
1102 } else if (mpTJI->allocateSeparateGVMemory()) {
1103 if (A <= 8) {
1104 Ptr = malloc(S);
1105 } else {
1106 // Allocate (S + A) bytes of memory, then use an aligned pointer
1107 // within that space.
1108 Ptr = malloc(S + A);
1109 unsigned int MisAligned = ((intptr_t) Ptr & (A - 1));
1110 Ptr = reinterpret_cast<uint8_t*>(Ptr) +
1111 (MisAligned ? (A - MisAligned) : 0);
1112 }
1113 } else {
1114 Ptr = allocateGlobal(S, A);
1115 }
1116
1117 return Ptr;
1118}
1119
1120
1121void CodeEmitter::EmitGlobalVariable(const llvm::GlobalVariable *GV) {
1122 void *GA = GetPointerToGlobalIfAvailable(GV);
1123
1124 if (GV->isThreadLocal())
1125 llvm::report_fatal_error
1126 ("We don't support Thread Local Storage (TLS)!");
1127
1128 if (GA == NULL) {
1129 // If it's not already specified, allocate memory for the global.
1130 GA = GetMemoryForGV(GV);
1131 AddGlobalMapping(GV, GA);
1132 }
1133
1134 InitializeConstantToMemory(GV->getInitializer(), GA);
1135
1136 // You can do some statistics on global variable here.
1137 return;
1138}
1139
1140
1141void *CodeEmitter::GetPointerToGVIndirectSym(llvm::GlobalValue *V, void *Reference) {
1142 // Make sure GV is emitted first, and create a stub containing the fully
1143 // resolved address.
1144 void *GVAddress = GetPointerToGlobal(V, Reference, false);
1145
1146 // If we already have a stub for this global variable, recycle it.
1147 void *&IndirectSym = GlobalToIndirectSymMap[V];
1148 // Otherwise, codegen a new indirect symbol.
1149 if (!IndirectSym)
1150 IndirectSym = mpTJI->emitGlobalValueIndirectSym(V, GVAddress, *this);
1151
1152 return IndirectSym;
1153}
1154
1155
1156// Return a stub for the function at the specified address.
1157void *CodeEmitter::GetExternalFunctionStub(void *FnAddr) {
1158 void *&Stub = ExternalFnToStubMap[FnAddr];
1159 if (Stub)
1160 return Stub;
1161
1162 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1163 startGVStub(0, SL.Size, SL.Alignment);
1164 Stub = mpTJI->emitFunctionStub(0, FnAddr, *this);
1165 finishGVStub();
1166
1167 return Stub;
1168}
1169
1170
Logan28325bf2010-11-26 23:27:41 +08001171void CodeEmitter::Disassemble(const llvm::StringRef &Name,
1172 uint8_t *Start, size_t Length, bool IsStub) {
Logane57b3322010-11-27 18:21:00 +08001173
1174#if defined(USE_DISASSEMBLER)
Logan28325bf2010-11-26 23:27:41 +08001175 llvm::raw_ostream *OS;
1176
1177#if defined(USE_DISASSEMBLER_FILE)
1178 std::string ErrorInfo;
1179 OS = new llvm::raw_fd_ostream("/data/local/tmp/out.S",
1180 ErrorInfo,
1181 llvm::raw_fd_ostream::F_Append);
Logane57b3322010-11-27 18:21:00 +08001182
Logan28325bf2010-11-26 23:27:41 +08001183 if (!ErrorInfo.empty()) { // some errors occurred
1184 // LOGE("Error in creating disassembly file");
1185 delete OS;
1186 return;
1187 }
1188#else
1189 OS = &llvm::outs();
1190#endif
1191
1192 *OS << "JIT: Disassembled code: " << Name << ((IsStub) ? " (stub)" : "")
1193 << "\n";
1194
1195 if (mpAsmInfo == NULL)
Loganf2b79d02010-11-27 01:07:53 +08001196 mpAsmInfo = mpTarget->createAsmInfo(Compiler::Triple);
Logan28325bf2010-11-26 23:27:41 +08001197 if (mpDisassmbler == NULL)
1198 mpDisassmbler = mpTarget->createMCDisassembler();
1199 if (mpIP == NULL)
1200 mpIP = mpTarget->createMCInstPrinter(mpAsmInfo->getAssemblerDialect(),
1201 *mpAsmInfo);
1202
1203 const BufferMemoryObject *BufferMObj = new BufferMemoryObject(Start,
1204 Length);
1205 uint64_t Size;
1206 uint64_t Index;
1207
1208 for (Index = 0; Index < Length; Index += Size) {
1209 llvm::MCInst Inst;
1210
1211 if (mpDisassmbler->getInstruction(Inst, Size, *BufferMObj, Index,
1212 /* REMOVED */ llvm::nulls())) {
1213 (*OS).indent(4)
1214 .write("0x", 2)
1215 .write_hex((uint32_t) Start + Index)
1216 .write(':');
1217 mpIP->printInst(&Inst, *OS);
1218 *OS << "\n";
1219 } else {
1220 if (Size == 0)
1221 Size = 1; // skip illegible bytes
1222 }
1223 }
1224
1225 *OS << "\n";
1226 delete BufferMObj;
1227
1228#if defined(USE_DISASSEMBLER_FILE)
1229 // If you want the disassemble results write to file, uncomment this.
Shih-wei Liaoa21958a2010-12-04 18:29:05 -08001230 ((llvm::raw_fd_ostream*)OS)->close();
Logan28325bf2010-11-26 23:27:41 +08001231 delete OS;
1232#endif
1233
Logane57b3322010-11-27 18:21:00 +08001234#endif // defined(USE_DISASSEMBLER)
Logan28325bf2010-11-26 23:27:41 +08001235}
Logan28325bf2010-11-26 23:27:41 +08001236
1237
1238void CodeEmitter::setTargetMachine(llvm::TargetMachine &TM) {
1239 // Set Target
1240 mpTarget = &TM.getTarget();
1241 // Set TargetJITInfo
1242 mpTJI = TM.getJITInfo();
1243 // set TargetData
1244 mpTD = TM.getTargetData();
1245
1246 assert(!mpTJI->needsGOT() && "We don't support GOT needed target!");
1247
1248 return;
1249}
1250
1251
1252// This callback is invoked when the specified function is about to be code
1253// generated. This initializes the BufferBegin/End/Ptr fields.
1254void CodeEmitter::startFunction(llvm::MachineFunction &F) {
1255 uintptr_t ActualSize = 0;
1256
1257 mpMemMgr->setMemoryWritable();
1258
1259 // BufferBegin, BufferEnd and CurBufferPtr are all inherited from class
1260 // MachineCodeEmitter, which is the super class of the class
1261 // JITCodeEmitter.
1262 //
1263 // BufferBegin/BufferEnd - Pointers to the start and end of the memory
1264 // allocated for this code buffer.
1265 //
1266 // CurBufferPtr - Pointer to the next byte of memory to fill when emitting
1267 // code. This is guranteed to be in the range
1268 // [BufferBegin, BufferEnd]. If this pointer is at
1269 // BufferEnd, it will never move due to code emission, and
1270 // all code emission requests will be ignored (this is the
1271 // buffer overflow condition).
1272 BufferBegin = CurBufferPtr =
1273 mpMemMgr->startFunctionBody(F.getFunction(), ActualSize);
1274 BufferEnd = BufferBegin + ActualSize;
1275
1276 if (mpCurEmitFunction == NULL)
Logan5a765f72010-12-29 01:53:52 +08001277 mpCurEmitFunction = new EmittedFuncInfo();
Logan28325bf2010-11-26 23:27:41 +08001278 mpCurEmitFunction->FunctionBody = BufferBegin;
1279
1280 // Ensure the constant pool/jump table info is at least 4-byte aligned.
1281 emitAlignment(16);
1282
1283 emitConstantPool(F.getConstantPool());
1284 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1285 initJumpTableInfo(MJTI);
1286
1287 // About to start emitting the machine code for the function.
1288 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
1289
1290 UpdateGlobalMapping(F.getFunction(), CurBufferPtr);
1291
1292 mpCurEmitFunction->Code = CurBufferPtr;
1293
1294 mMBBLocations.clear();
1295}
1296
1297
1298// This callback is invoked when the specified function has finished code
1299// generation. If a buffer overflow has occurred, this method returns true
1300// (the callee is required to try again).
1301bool CodeEmitter::finishFunction(llvm::MachineFunction &F) {
1302 if (CurBufferPtr == BufferEnd) {
1303 // No enough memory
1304 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1305 return false;
1306 }
1307
1308 if (llvm::MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
1309 emitJumpTableInfo(MJTI);
1310
1311 // FnStart is the start of the text, not the start of the constant pool
1312 // and other per-function data.
1313 uint8_t *FnStart =
1314 reinterpret_cast<uint8_t*>(
1315 GetPointerToGlobalIfAvailable(F.getFunction()));
1316
1317 // FnEnd is the end of the function's machine code.
1318 uint8_t *FnEnd = CurBufferPtr;
1319
1320 if (!mRelocations.empty()) {
1321 ptrdiff_t BufferOffset = BufferBegin - mpMemMgr->getCodeMemBase();
1322
1323 // Resolve the relocations to concrete pointers.
1324 for (int i = 0, e = mRelocations.size(); i != e; i++) {
1325 llvm::MachineRelocation &MR = mRelocations[i];
1326 void *ResultPtr = NULL;
1327
1328 if (!MR.letTargetResolve()) {
1329 if (MR.isExternalSymbol()) {
1330 ResultPtr = GetPointerToNamedSymbol(MR.getExternalSymbol(), true);
1331
1332 if (MR.mayNeedFarStub()) {
1333 ResultPtr = GetExternalFunctionStub(ResultPtr);
1334 }
1335
1336 } else if (MR.isGlobalValue()) {
1337 ResultPtr = GetPointerToGlobal(MR.getGlobalValue(),
1338 BufferBegin
1339 + MR.getMachineCodeOffset(),
1340 MR.mayNeedFarStub());
1341 } else if (MR.isIndirectSymbol()) {
1342 ResultPtr =
1343 GetPointerToGVIndirectSym(
1344 MR.getGlobalValue(),
1345 BufferBegin + MR.getMachineCodeOffset());
1346 } else if (MR.isBasicBlock()) {
1347 ResultPtr =
1348 (void*) getMachineBasicBlockAddress(MR.getBasicBlock());
1349 } else if (MR.isConstantPoolIndex()) {
1350 ResultPtr =
1351 (void*) getConstantPoolEntryAddress(MR.getConstantPoolIndex());
1352 } else {
1353 assert(MR.isJumpTableIndex() && "Unknown type of relocation");
1354 ResultPtr =
1355 (void*) getJumpTableEntryAddress(MR.getJumpTableIndex());
1356 }
1357
1358 if (!MR.isExternalSymbol() || MR.mayNeedFarStub()) {
1359 // TODO(logan): Cache external symbol relocation entry.
1360 // Currently, we are not caching them. But since Android
1361 // system is using prelink, it is not a problem.
1362
1363 // Cache the relocation result address
1364 mCachingRelocations.push_back(
1365 oBCCRelocEntry(MR.getRelocationType(),
1366 MR.getMachineCodeOffset() + BufferOffset,
1367 ResultPtr));
1368 }
1369
1370 MR.setResultPointer(ResultPtr);
1371 }
1372 }
1373
1374 mpTJI->relocate(BufferBegin, &mRelocations[0], mRelocations.size(),
1375 mpMemMgr->getGOTBase());
1376 }
1377
1378 mpMemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
1379 // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
1380 // global variables that were referenced in the relocations.
1381 if (CurBufferPtr == BufferEnd)
1382 return false;
1383
1384 // Now that we've succeeded in emitting the function.
1385 mpCurEmitFunction->Size = CurBufferPtr - BufferBegin;
1386 BufferBegin = CurBufferPtr = 0;
1387
1388 if (F.getFunction()->hasName())
1389 mEmittedFunctions[F.getFunction()->getNameStr()] = mpCurEmitFunction;
1390 mpCurEmitFunction = NULL;
1391
1392 mRelocations.clear();
1393 mConstPoolAddresses.clear();
1394
1395 if (mpMMI)
1396 mpMMI->EndFunction();
1397
1398 updateFunctionStub(F.getFunction());
1399
1400 // Mark code region readable and executable if it's not so already.
1401 mpMemMgr->setMemoryExecutable();
1402
1403 Disassemble(F.getFunction()->getName(), FnStart, FnEnd - FnStart, false);
1404
1405 return false;
1406}
1407
1408
1409void CodeEmitter::startGVStub(const llvm::GlobalValue *GV, unsigned StubSize,
1410 unsigned Alignment) {
1411 mpSavedBufferBegin = BufferBegin;
1412 mpSavedBufferEnd = BufferEnd;
1413 mpSavedCurBufferPtr = CurBufferPtr;
1414
1415 BufferBegin = CurBufferPtr = mpMemMgr->allocateStub(GV, StubSize,
1416 Alignment);
1417 BufferEnd = BufferBegin + StubSize + 1;
1418
1419 return;
1420}
1421
1422
1423void CodeEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1424 mpSavedBufferBegin = BufferBegin;
1425 mpSavedBufferEnd = BufferEnd;
1426 mpSavedCurBufferPtr = CurBufferPtr;
1427
1428 BufferBegin = CurBufferPtr = reinterpret_cast<uint8_t *>(Buffer);
1429 BufferEnd = BufferBegin + StubSize + 1;
1430
1431 return;
1432}
1433
1434
1435void CodeEmitter::finishGVStub() {
1436 assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1437
1438 // restore
1439 BufferBegin = mpSavedBufferBegin;
1440 BufferEnd = mpSavedBufferEnd;
1441 CurBufferPtr = mpSavedCurBufferPtr;
1442}
1443
1444
1445// Allocates and fills storage for an indirect GlobalValue, and returns the
1446// address.
1447void *CodeEmitter::allocIndirectGV(const llvm::GlobalValue *GV,
1448 const uint8_t *Buffer, size_t Size,
1449 unsigned Alignment) {
1450 uint8_t *IndGV = mpMemMgr->allocateStub(GV, Size, Alignment);
1451 memcpy(IndGV, Buffer, Size);
1452 return IndGV;
1453}
1454
1455
1456// Allocate memory for a global. Unlike allocateSpace, this method does not
1457// allocate memory in the current output buffer, because a global may live
1458// longer than the current function.
1459void *CodeEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1460 // Delegate this call through the memory manager.
1461 return mpMemMgr->allocateGlobal(Size, Alignment);
1462}
1463
1464
1465// This should be called by the target when a new basic block is about to be
1466// emitted. This way the MCE knows where the start of the block is, and can
1467// implement getMachineBasicBlockAddress.
1468void CodeEmitter::StartMachineBasicBlock(llvm::MachineBasicBlock *MBB) {
1469 if (mMBBLocations.size() <= (unsigned) MBB->getNumber())
1470 mMBBLocations.resize((MBB->getNumber() + 1) * 2);
1471 mMBBLocations[MBB->getNumber()] = getCurrentPCValue();
1472 return;
1473}
1474
1475
1476// Return the address of the jump table with index @Index in the function
1477// that last called initJumpTableInfo.
1478uintptr_t CodeEmitter::getJumpTableEntryAddress(unsigned Index) const {
1479 const std::vector<llvm::MachineJumpTableEntry> &JT =
1480 mpJumpTable->getJumpTables();
1481
1482 assert((Index < JT.size()) && "Invalid jump table index!");
1483
1484 unsigned int Offset = 0;
1485 unsigned int EntrySize = mpJumpTable->getEntrySize(*mpTD);
1486
1487 for (unsigned i = 0; i < Index; i++)
1488 Offset += JT[i].MBBs.size();
1489 Offset *= EntrySize;
1490
1491 return (uintptr_t)(reinterpret_cast<uint8_t*>(mpJumpTableBase) + Offset);
1492}
1493
1494
1495// Return the address of the specified MachineBasicBlock, only usable after
1496// the label for the MBB has been emitted.
1497uintptr_t CodeEmitter::getMachineBasicBlockAddress(
1498 llvm::MachineBasicBlock *MBB) const {
1499 assert(mMBBLocations.size() > (unsigned) MBB->getNumber() &&
1500 mMBBLocations[MBB->getNumber()] &&
1501 "MBB not emitted!");
1502 return mMBBLocations[MBB->getNumber()];
1503}
1504
1505
1506void CodeEmitter::updateFunctionStub(const llvm::Function *F) {
1507 // Get the empty stub we generated earlier.
1508 void *Stub;
1509 std::set<const llvm::Function*>::iterator I = PendingFunctions.find(F);
1510 if (I != PendingFunctions.end())
1511 Stub = mFunctionToLazyStubMap[F];
1512 else
1513 return;
1514
1515 void *Addr = GetPointerToGlobalIfAvailable(F);
1516
1517 assert(Addr != Stub &&
1518 "Function must have non-stub address to be updated.");
1519
1520 // Tell the target jit info to rewrite the stub at the specified address,
1521 // rather than creating a new one.
1522 llvm::TargetJITInfo::StubLayout SL = mpTJI->getStubLayout();
1523 startGVStub(Stub, SL.Size);
1524 mpTJI->emitFunctionStub(F, Addr, *this);
1525 finishGVStub();
1526
1527 Disassemble(F->getName(), reinterpret_cast<uint8_t*>(Stub),
1528 SL.Size, true);
1529
1530 PendingFunctions.erase(I);
1531}
1532
1533
Loganbce48b92010-11-27 16:44:24 +08001534void *CodeEmitter::lookup(const llvm::StringRef &Name) {
1535 EmittedFunctionsMapTy::const_iterator
1536 I = mEmittedFunctions.find(Name.str());
1537
1538 return (I == mEmittedFunctions.end()) ? NULL : I->second->Code;
1539}
1540
1541
Logan28325bf2010-11-26 23:27:41 +08001542void CodeEmitter::getFunctionNames(BCCsizei *actualFunctionCount,
1543 BCCsizei maxFunctionCount,
1544 BCCchar **functions) {
1545 int functionCount = mEmittedFunctions.size();
1546
1547 if (actualFunctionCount)
1548 *actualFunctionCount = functionCount;
1549 if (functionCount > maxFunctionCount)
1550 functionCount = maxFunctionCount;
1551 if (functions)
1552 for (EmittedFunctionsMapTy::const_iterator
1553 I = mEmittedFunctions.begin(), E = mEmittedFunctions.end();
1554 I != E && (functionCount > 0); I++, functionCount--) {
1555 *functions++ = const_cast<BCCchar*>(I->first.c_str());
1556 }
1557}
1558
1559
1560void CodeEmitter::getFunctionBinary(BCCchar *label,
1561 BCCvoid **base,
1562 BCCsizei *length) {
1563 EmittedFunctionsMapTy::const_iterator I = mEmittedFunctions.find(label);
1564 if (I == mEmittedFunctions.end()) {
1565 *base = NULL;
1566 *length = 0;
1567 } else {
1568 *base = I->second->Code;
1569 *length = I->second->Size;
1570 }
1571}
1572
1573} // namespace bcc