Merge "Manage llvm::TargetMachine with smart pointer." into ics-mr1-plus-art
diff --git a/src/compiler/Compiler.h b/src/compiler/Compiler.h
index db7f2ba..2f95be2 100644
--- a/src/compiler/Compiler.h
+++ b/src/compiler/Compiler.h
@@ -42,102 +42,102 @@
/* Suppress optimization if corresponding bit set */
enum optControlVector {
- kLoadStoreElimination = 0,
- kLoadHoisting,
- kSuppressLoads,
- kNullCheckElimination,
- kPromoteRegs,
- kTrackLiveTemps,
- kSkipLargeMethodOptimization,
- kSafeOptimizations,
- kBBOpt,
- kMatch,
- kPromoteCompilerTemps,
+ kLoadStoreElimination = 0,
+ kLoadHoisting,
+ kSuppressLoads,
+ kNullCheckElimination,
+ kPromoteRegs,
+ kTrackLiveTemps,
+ kSkipLargeMethodOptimization,
+ kSafeOptimizations,
+ kBBOpt,
+ kMatch,
+ kPromoteCompilerTemps,
};
/* Type of allocation for memory tuning */
enum oatAllocKind {
- kAllocMisc,
- kAllocBB,
- kAllocLIR,
- kAllocMIR,
- kAllocDFInfo,
- kAllocGrowableList,
- kAllocGrowableBitMap,
- kAllocDalvikToSSAMap,
- kAllocDebugInfo,
- kAllocSuccessor,
- kAllocRegAlloc,
- kAllocData,
- kAllocPredecessors,
- kNumAllocKinds
+ kAllocMisc,
+ kAllocBB,
+ kAllocLIR,
+ kAllocMIR,
+ kAllocDFInfo,
+ kAllocGrowableList,
+ kAllocGrowableBitMap,
+ kAllocDalvikToSSAMap,
+ kAllocDebugInfo,
+ kAllocSuccessor,
+ kAllocRegAlloc,
+ kAllocData,
+ kAllocPredecessors,
+ kNumAllocKinds
};
/* Type of growable list for memory tuning */
enum oatListKind {
- kListMisc = 0,
- kListBlockList,
- kListSSAtoDalvikMap,
- kListDfsOrder,
- kListDfsPostOrder,
- kListDomPostOrderTraversal,
- kListThrowLaunchPads,
- kListSuspendLaunchPads,
- kListSwitchTables,
- kListFillArrayData,
- kListSuccessorBlocks,
- kListPredecessors,
- kNumListKinds
+ kListMisc = 0,
+ kListBlockList,
+ kListSSAtoDalvikMap,
+ kListDfsOrder,
+ kListDfsPostOrder,
+ kListDomPostOrderTraversal,
+ kListThrowLaunchPads,
+ kListSuspendLaunchPads,
+ kListSwitchTables,
+ kListFillArrayData,
+ kListSuccessorBlocks,
+ kListPredecessors,
+ kNumListKinds
};
/* Type of growable bitmap for memory tuning */
enum oatBitMapKind {
- kBitMapMisc = 0,
- kBitMapUse,
- kBitMapDef,
- kBitMapLiveIn,
- kBitMapBMatrix,
- kBitMapDominators,
- kBitMapIDominated,
- kBitMapDomFrontier,
- kBitMapPhi,
- kBitMapTmpBlocks,
- kBitMapInputBlocks,
- kBitMapRegisterV,
- kBitMapTempSSARegisterV,
- kBitMapNullCheck,
- kBitMapTmpBlockV,
- kBitMapPredecessors,
- kNumBitMapKinds
+ kBitMapMisc = 0,
+ kBitMapUse,
+ kBitMapDef,
+ kBitMapLiveIn,
+ kBitMapBMatrix,
+ kBitMapDominators,
+ kBitMapIDominated,
+ kBitMapDomFrontier,
+ kBitMapPhi,
+ kBitMapTmpBlocks,
+ kBitMapInputBlocks,
+ kBitMapRegisterV,
+ kBitMapTempSSARegisterV,
+ kBitMapNullCheck,
+ kBitMapTmpBlockV,
+ kBitMapPredecessors,
+ kNumBitMapKinds
};
/* Force code generation paths for testing */
enum debugControlVector {
- kDebugDisplayMissingTargets,
- kDebugVerbose,
- kDebugDumpCFG,
- kDebugSlowFieldPath,
- kDebugSlowInvokePath,
- kDebugSlowStringPath,
- kDebugSlowTypePath,
- kDebugSlowestFieldPath,
- kDebugSlowestStringPath,
- kDebugExerciseResolveMethod,
- kDebugVerifyDataflow,
- kDebugShowMemoryUsage,
- kDebugShowNops,
- kDebugCountOpcodes,
+ kDebugDisplayMissingTargets,
+ kDebugVerbose,
+ kDebugDumpCFG,
+ kDebugSlowFieldPath,
+ kDebugSlowInvokePath,
+ kDebugSlowStringPath,
+ kDebugSlowTypePath,
+ kDebugSlowestFieldPath,
+ kDebugSlowestStringPath,
+ kDebugExerciseResolveMethod,
+ kDebugVerifyDataflow,
+ kDebugShowMemoryUsage,
+ kDebugShowNops,
+ kDebugCountOpcodes,
};
enum OatMethodAttributes {
- kIsCallee = 0, /* Code is part of a callee (invoked by a hot trace) */
- kIsHot, /* Code is part of a hot trace */
- kIsLeaf, /* Method is leaf */
- kIsEmpty, /* Method is empty */
- kIsThrowFree, /* Method doesn't throw */
- kIsGetter, /* Method fits the getter pattern */
- kIsSetter, /* Method fits the setter pattern */
- kCannotCompile, /* Method cannot be compiled */
+ kIsCallee = 0, /* Code is part of a callee (invoked by a hot trace) */
+ kIsHot, /* Code is part of a hot trace */
+ kIsLeaf, /* Method is leaf */
+ kIsEmpty, /* Method is empty */
+ kIsThrowFree, /* Method doesn't throw */
+ kIsGetter, /* Method fits the getter pattern */
+ kIsSetter, /* Method fits the setter pattern */
+ kCannotCompile, /* Method cannot be compiled */
};
#define METHOD_IS_CALLEE (1 << kIsCallee)
@@ -151,12 +151,12 @@
/* Customized node traversal orders for different needs */
enum DataFlowAnalysisMode {
- kAllNodes = 0, // All nodes
- kReachableNodes, // All reachable nodes
- kPreOrderDFSTraversal, // Depth-First-Search / Pre-Order
- kPostOrderDFSTraversal, // Depth-First-Search / Post-Order
- kPostOrderDOMTraversal, // Dominator tree / Post-Order
- kReversePostOrderTraversal, // Depth-First-Search / reverse Post-Order
+ kAllNodes = 0, // All nodes
+ kReachableNodes, // All reachable nodes
+ kPreOrderDFSTraversal, // Depth-First-Search / Pre-Order
+ kPostOrderDFSTraversal, // Depth-First-Search / Post-Order
+ kPostOrderDOMTraversal, // Dominator tree / Post-Order
+ kReversePostOrderTraversal, // Depth-First-Search / reverse Post-Order
};
struct CompilationUnit;
@@ -184,9 +184,9 @@
char* oatFullDisassembler(CompilationUnit* cUnit, const MIR* mir);
char* oatGetSSAString(CompilationUnit* cUnit, SSARepresentation* ssaRep);
void oatDataFlowAnalysisDispatcher(CompilationUnit* cUnit,
- bool (*func)(CompilationUnit* , BasicBlock*),
- DataFlowAnalysisMode dfaMode,
- bool isIterative);
+ bool (*func)(CompilationUnit* , BasicBlock*),
+ DataFlowAnalysisMode dfaMode,
+ bool isIterative);
void oatMethodSSATransformation(CompilationUnit* cUnit);
u8 oatGetRegResourceMask(int reg);
void oatDumpCFG(CompilationUnit* cUnit, const char* dirPrefix);
diff --git a/src/compiler/CompilerIR.h b/src/compiler/CompilerIR.h
index 67d741a..2e0f6d5 100644
--- a/src/compiler/CompilerIR.h
+++ b/src/compiler/CompilerIR.h
@@ -31,49 +31,49 @@
#define SLOW_STRING_PATH (cUnit->enableDebug & (1 << kDebugSlowStringPath))
#define SLOW_TYPE_PATH (cUnit->enableDebug & (1 << kDebugSlowTypePath))
#define EXERCISE_SLOWEST_FIELD_PATH (cUnit->enableDebug & \
- (1 << kDebugSlowestFieldPath))
+ (1 << kDebugSlowestFieldPath))
#define EXERCISE_SLOWEST_STRING_PATH (cUnit->enableDebug & \
- (1 << kDebugSlowestStringPath))
+ (1 << kDebugSlowestStringPath))
#define EXERCISE_RESOLVE_METHOD (cUnit->enableDebug & \
- (1 << kDebugExerciseResolveMethod))
+ (1 << kDebugExerciseResolveMethod))
enum RegisterClass {
- kCoreReg,
- kFPReg,
- kAnyReg,
+ kCoreReg,
+ kFPReg,
+ kAnyReg,
};
enum RegLocationType {
- kLocDalvikFrame = 0, // Normal Dalvik register
- kLocPhysReg,
- kLocCompilerTemp,
- kLocInvalid
+ kLocDalvikFrame = 0, // Normal Dalvik register
+ kLocPhysReg,
+ kLocCompilerTemp,
+ kLocInvalid
};
struct PromotionMap {
- RegLocationType coreLocation:3;
- u1 coreReg;
- RegLocationType fpLocation:3;
- u1 fpReg;
- bool firstInPair;
+ RegLocationType coreLocation:3;
+ u1 coreReg;
+ RegLocationType fpLocation:3;
+ u1 fpReg;
+ bool firstInPair;
};
struct RegLocation {
- RegLocationType location:3;
- unsigned wide:1;
- unsigned defined:1; // Do we know the type?
- unsigned fp:1; // Floating point?
- unsigned core:1; // Non-floating point?
- unsigned highWord:1; // High word of pair?
- unsigned home:1; // Does this represent the home location?
- u1 lowReg; // First physical register
- u1 highReg; // 2nd physical register (if wide)
- int32_t sRegLow; // SSA name for low Dalvik word
+ RegLocationType location:3;
+ unsigned wide:1;
+ unsigned defined:1; // Do we know the type?
+ unsigned fp:1; // Floating point?
+ unsigned core:1; // Non-floating point?
+ unsigned highWord:1; // High word of pair?
+ unsigned home:1; // Does this represent the home location?
+ u1 lowReg; // First physical register
+ u1 highReg; // 2nd physical register (if wide)
+ int32_t sRegLow; // SSA name for low Dalvik word
};
struct CompilerTemp {
- int sReg;
- ArenaBitVector* bv;
+ int sReg;
+ ArenaBitVector* bv;
};
/*
@@ -83,25 +83,25 @@
* possible.
*/
struct RegisterInfo {
- int reg; // Reg number
- bool inUse; // Has it been allocated?
- bool isTemp; // Can allocate as temp?
- bool pair; // Part of a register pair?
- int partner; // If pair, other reg of pair
- bool live; // Is there an associated SSA name?
- bool dirty; // If live, is it dirty?
- int sReg; // Name of live value
- LIR *defStart; // Starting inst in last def sequence
- LIR *defEnd; // Ending inst in last def sequence
+ int reg; // Reg number
+ bool inUse; // Has it been allocated?
+ bool isTemp; // Can allocate as temp?
+ bool pair; // Part of a register pair?
+ int partner; // If pair, other reg of pair
+ bool live; // Is there an associated SSA name?
+ bool dirty; // If live, is it dirty?
+ int sReg; // Name of live value
+ LIR *defStart; // Starting inst in last def sequence
+ LIR *defEnd; // Ending inst in last def sequence
};
struct RegisterPool {
- int numCoreRegs;
- RegisterInfo *coreRegs;
- int nextCoreReg;
- int numFPRegs;
- RegisterInfo *FPRegs;
- int nextFPReg;
+ int numCoreRegs;
+ RegisterInfo *coreRegs;
+ int nextCoreReg;
+ int numFPRegs;
+ RegisterInfo *FPRegs;
+ int nextFPReg;
};
#define INVALID_SREG (-1)
@@ -126,11 +126,11 @@
#define MANY_BLOCKS 4000 /* Non-initializer threshold */
enum BBType {
- kEntryBlock,
- kDalvikByteCode,
- kExitBlock,
- kExceptionHandling,
- kCatchEntry,
+ kEntryBlock,
+ kDalvikByteCode,
+ kExitBlock,
+ kExceptionHandling,
+ kCatchEntry,
};
/* Utility macros to traverse the LIR list */
@@ -141,54 +141,54 @@
#define PREV_LIR_LVALUE(lir) (lir)->prev
struct LIR {
- int offset; // Offset of this instruction
- int dalvikOffset; // Offset of Dalvik opcode
- LIR* next;
- LIR* prev;
- LIR* target;
- int opcode;
- int operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]
- struct {
- bool isNop:1; // LIR is optimized away
- bool pcRelFixup:1; // May need pc-relative fixup
- unsigned int age:4; // default is 0, set lazily by the optimizer
- unsigned int size:5; // in bytes
- unsigned int unused:21;
- } flags;
- int aliasInfo; // For Dalvik register & litpool disambiguation
- u8 useMask; // Resource mask for use
- u8 defMask; // Resource mask for def
+ int offset; // Offset of this instruction
+ int dalvikOffset; // Offset of Dalvik opcode
+ LIR* next;
+ LIR* prev;
+ LIR* target;
+ int opcode;
+ int operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]
+ struct {
+ bool isNop:1; // LIR is optimized away
+ bool pcRelFixup:1; // May need pc-relative fixup
+ unsigned int age:4; // default is 0, set lazily by the optimizer
+ unsigned int size:5; // in bytes
+ unsigned int unused:21;
+ } flags;
+ int aliasInfo; // For Dalvik register & litpool disambiguation
+ u8 useMask; // Resource mask for use
+ u8 defMask; // Resource mask for def
};
enum ExtendedMIROpcode {
- kMirOpFirst = kNumPackedOpcodes,
- kMirOpPhi = kMirOpFirst,
- kMirOpCopy,
- kMirOpFusedCmplFloat,
- kMirOpFusedCmpgFloat,
- kMirOpFusedCmplDouble,
- kMirOpFusedCmpgDouble,
- kMirOpFusedCmpLong,
- kMirOpNop,
- kMirOpNullNRangeUpCheck,
- kMirOpNullNRangeDownCheck,
- kMirOpLowerBound,
- kMirOpLast,
+ kMirOpFirst = kNumPackedOpcodes,
+ kMirOpPhi = kMirOpFirst,
+ kMirOpCopy,
+ kMirOpFusedCmplFloat,
+ kMirOpFusedCmpgFloat,
+ kMirOpFusedCmplDouble,
+ kMirOpFusedCmpgDouble,
+ kMirOpFusedCmpLong,
+ kMirOpNop,
+ kMirOpNullNRangeUpCheck,
+ kMirOpNullNRangeDownCheck,
+ kMirOpLowerBound,
+ kMirOpLast,
};
struct SSARepresentation;
enum MIROptimizationFlagPositons {
- kMIRIgnoreNullCheck = 0,
- kMIRNullCheckOnly,
- kMIRIgnoreRangeCheck,
- kMIRRangeCheckOnly,
- kMIRInlined, // Invoke is inlined (ie dead)
- kMIRInlinedPred, // Invoke is inlined via prediction
- kMIRCallee, // Instruction is inlined from callee
- kMIRIgnoreSuspendCheck,
- kMIRDup,
- kMIRMark, // Temporary node mark
+ kMIRIgnoreNullCheck = 0,
+ kMIRNullCheckOnly,
+ kMIRIgnoreRangeCheck,
+ kMIRRangeCheckOnly,
+ kMIRInlined, // Invoke is inlined (ie dead)
+ kMIRInlinedPred, // Invoke is inlined via prediction
+ kMIRCallee, // Instruction is inlined from callee
+ kMIRIgnoreSuspendCheck,
+ kMIRDup,
+ kMIRMark, // Temporary node mark
};
#define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck)
@@ -203,68 +203,68 @@
#define MIR_MARK (1 << kMIRMark)
struct CallsiteInfo {
- const char* classDescriptor;
- Object* classLoader;
- const Method* method;
- LIR* misPredBranchOver;
+ const char* classDescriptor;
+ Object* classLoader;
+ const Method* method;
+ LIR* misPredBranchOver;
};
struct MIR {
- DecodedInstruction dalvikInsn;
- unsigned int width;
- unsigned int offset;
- MIR* prev;
- MIR* next;
- SSARepresentation* ssaRep;
- int optimizationFlags;
- int seqNum;
- union {
- // Used by the inlined insn from the callee to find the mother method
- const Method* calleeMethod;
- // Used by the inlined invoke to find the class and method pointers
- CallsiteInfo* callsiteInfo;
- // Used to quickly locate all Phi opcodes
- MIR* phiNext;
- } meta;
+ DecodedInstruction dalvikInsn;
+ unsigned int width;
+ unsigned int offset;
+ MIR* prev;
+ MIR* next;
+ SSARepresentation* ssaRep;
+ int optimizationFlags;
+ int seqNum;
+ union {
+ // Used by the inlined insn from the callee to find the mother method
+ const Method* calleeMethod;
+ // Used by the inlined invoke to find the class and method pointers
+ CallsiteInfo* callsiteInfo;
+ // Used to quickly locate all Phi opcodes
+ MIR* phiNext;
+ } meta;
};
struct BasicBlockDataFlow;
/* For successorBlockList */
enum BlockListType {
- kNotUsed = 0,
- kCatch,
- kPackedSwitch,
- kSparseSwitch,
+ kNotUsed = 0,
+ kCatch,
+ kPackedSwitch,
+ kSparseSwitch,
};
struct BasicBlock {
- int id;
- int dfsId;
- bool visited;
- bool hidden;
- bool catchEntry;
- bool fallThroughTarget; // Reached via fallthrough
- uint16_t startOffset;
- uint16_t nestingDepth;
- const Method* containingMethod; // For blocks from the callee
- BBType blockType;
- bool needFallThroughBranch; // For blocks ended due to length limit
- bool isFallThroughFromInvoke; // True means the block needs alignment
- MIR* firstMIRInsn;
- MIR* lastMIRInsn;
- BasicBlock* fallThrough;
- BasicBlock* taken;
- BasicBlock* iDom; // Immediate dominator
- BasicBlockDataFlow* dataFlowInfo;
- GrowableList* predecessors;
- ArenaBitVector* dominators;
- ArenaBitVector* iDominated; // Set nodes being immediately dominated
- ArenaBitVector* domFrontier; // Dominance frontier
- struct { // For one-to-many successors like
- BlockListType blockListType; // switch and exception handling
- GrowableList blocks;
- } successorBlockList;
+ int id;
+ int dfsId;
+ bool visited;
+ bool hidden;
+ bool catchEntry;
+ bool fallThroughTarget; // Reached via fallthrough
+ uint16_t startOffset;
+ uint16_t nestingDepth;
+ const Method* containingMethod; // For blocks from the callee
+ BBType blockType;
+ bool needFallThroughBranch; // For blocks ended due to length limit
+ bool isFallThroughFromInvoke; // True means the block needs alignment
+ MIR* firstMIRInsn;
+ MIR* lastMIRInsn;
+ BasicBlock* fallThrough;
+ BasicBlock* taken;
+ BasicBlock* iDom; // Immediate dominator
+ BasicBlockDataFlow* dataFlowInfo;
+ GrowableList* predecessors;
+ ArenaBitVector* dominators;
+ ArenaBitVector* iDominated; // Set nodes being immediately dominated
+ ArenaBitVector* domFrontier; // Dominance frontier
+ struct { // For one-to-many successors like
+ BlockListType blockListType; // switch and exception handling
+ GrowableList blocks;
+ } successorBlockList;
};
/*
@@ -274,8 +274,8 @@
* For swtich blocks, key is the case value.
*/
struct SuccessorBlockInfo {
- BasicBlock* block;
- int key;
+ BasicBlock* block;
+ int key;
};
struct LoopAnalysis;
@@ -284,412 +284,412 @@
struct Memstats;
enum AssemblerStatus {
- kSuccess,
- kRetryAll,
- kRetryHalve
+ kSuccess,
+ kRetryAll,
+ kRetryHalve
};
#define NOTVISITED (-1)
struct CompilationUnit {
CompilationUnit()
- : numBlocks(0),
- compiler(NULL),
- class_linker(NULL),
- dex_file(NULL),
- dex_cache(NULL),
- class_loader(NULL),
- method_idx(0),
- code_item(NULL),
- access_flags(0),
- shorty(NULL),
- firstLIRInsn(NULL),
- lastLIRInsn(NULL),
- literalList(NULL),
- methodLiteralList(NULL),
- codeLiteralList(NULL),
- classPointerList(NULL),
- numClassPointers(0),
- chainCellOffsetLIR(NULL),
- disableOpt(0),
- enableDebug(0),
- headerSize(0),
- dataOffset(0),
- totalSize(0),
- assemblerStatus(kSuccess),
- assemblerRetries(0),
- genDebugger(false),
- printMe(false),
- hasClassLiterals(false),
- hasLoop(false),
- hasInvoke(false),
- heapMemOp(false),
- qdMode(false),
- usesLinkRegister(false),
- methodTraceSupport(false),
- regPool(NULL),
- optRound(0),
- instructionSet(kNone),
- numSSARegs(0),
- ssaBaseVRegs(NULL),
- ssaSubscripts(NULL),
- vRegToSSAMap(NULL),
- SSALastDefs(NULL),
- isConstantV(NULL),
- constantValues(NULL),
- phiAliasMap(NULL),
- phiList(NULL),
- regLocation(NULL),
- sequenceNumber(0),
- promotionMap(NULL),
- methodSReg(0),
- switchOverflowPad(NULL),
- numReachableBlocks(0),
- numDalvikRegisters(0),
- entryBlock(NULL),
- exitBlock(NULL),
- curBlock(NULL),
- nextCodegenBlock(NULL),
- iDomList(NULL),
- tryBlockAddr(NULL),
- defBlockMatrix(NULL),
- tempBlockV(NULL),
- tempDalvikRegisterV(NULL),
- tempSSARegisterV(NULL),
- printSSANames(false),
- blockLabelList(NULL),
- quitLoopMode(false),
- preservedRegsUsed(0),
- numIns(0),
- numOuts(0),
- numRegs(0),
- numCoreSpills(0),
- numFPSpills(0),
- numCompilerTemps(0),
- frameSize(0),
- coreSpillMask(0U),
- fpSpillMask(0U),
- attrs(0U),
- currentDalvikOffset(0),
- insns(NULL),
- insnsSize(0U),
- disableDataflow(false),
- defCount(0),
- compilerFlipMatch(false),
- arenaHead(NULL),
- currentArena(NULL),
- numArenaBlocks(0),
- mstats(NULL),
- opcodeCount(NULL) {
+ : numBlocks(0),
+ compiler(NULL),
+ class_linker(NULL),
+ dex_file(NULL),
+ dex_cache(NULL),
+ class_loader(NULL),
+ method_idx(0),
+ code_item(NULL),
+ access_flags(0),
+ shorty(NULL),
+ firstLIRInsn(NULL),
+ lastLIRInsn(NULL),
+ literalList(NULL),
+ methodLiteralList(NULL),
+ codeLiteralList(NULL),
+ classPointerList(NULL),
+ numClassPointers(0),
+ chainCellOffsetLIR(NULL),
+ disableOpt(0),
+ enableDebug(0),
+ headerSize(0),
+ dataOffset(0),
+ totalSize(0),
+ assemblerStatus(kSuccess),
+ assemblerRetries(0),
+ genDebugger(false),
+ printMe(false),
+ hasClassLiterals(false),
+ hasLoop(false),
+ hasInvoke(false),
+ heapMemOp(false),
+ qdMode(false),
+ usesLinkRegister(false),
+ methodTraceSupport(false),
+ regPool(NULL),
+ optRound(0),
+ instructionSet(kNone),
+ numSSARegs(0),
+ ssaBaseVRegs(NULL),
+ ssaSubscripts(NULL),
+ vRegToSSAMap(NULL),
+ SSALastDefs(NULL),
+ isConstantV(NULL),
+ constantValues(NULL),
+ phiAliasMap(NULL),
+ phiList(NULL),
+ regLocation(NULL),
+ sequenceNumber(0),
+ promotionMap(NULL),
+ methodSReg(0),
+ switchOverflowPad(NULL),
+ numReachableBlocks(0),
+ numDalvikRegisters(0),
+ entryBlock(NULL),
+ exitBlock(NULL),
+ curBlock(NULL),
+ nextCodegenBlock(NULL),
+ iDomList(NULL),
+ tryBlockAddr(NULL),
+ defBlockMatrix(NULL),
+ tempBlockV(NULL),
+ tempDalvikRegisterV(NULL),
+ tempSSARegisterV(NULL),
+ printSSANames(false),
+ blockLabelList(NULL),
+ quitLoopMode(false),
+ preservedRegsUsed(0),
+ numIns(0),
+ numOuts(0),
+ numRegs(0),
+ numCoreSpills(0),
+ numFPSpills(0),
+ numCompilerTemps(0),
+ frameSize(0),
+ coreSpillMask(0U),
+ fpSpillMask(0U),
+ attrs(0U),
+ currentDalvikOffset(0),
+ insns(NULL),
+ insnsSize(0U),
+ disableDataflow(false),
+ defCount(0),
+ compilerFlipMatch(false),
+ arenaHead(NULL),
+ currentArena(NULL),
+ numArenaBlocks(0),
+ mstats(NULL),
+ opcodeCount(NULL) {
#if !defined(NDEBUG)
liveSReg = 0;
#endif
- }
+ }
- int numBlocks;
- GrowableList blockList;
- Compiler* compiler; // Compiler driving this compiler
- ClassLinker* class_linker; // Linker to resolve fields and methods
- const DexFile* dex_file; // DexFile containing the method being compiled
- DexCache* dex_cache; // DexFile's corresponding cache
- const ClassLoader* class_loader; // compiling method's class loader
- uint32_t method_idx; // compiling method's index into method_ids of DexFile
- const DexFile::CodeItem* code_item; // compiling method's DexFile code_item
- uint32_t access_flags; // compiling method's access flags
- const char* shorty; // compiling method's shorty
- LIR* firstLIRInsn;
- LIR* lastLIRInsn;
- LIR* literalList; // Constants
- LIR* methodLiteralList; // Method literals requiring patching
- LIR* codeLiteralList; // Code literals requiring patching
- LIR* classPointerList; // Relocatable
- int numClassPointers;
- LIR* chainCellOffsetLIR;
- uint32_t disableOpt; // optControlVector flags
- uint32_t enableDebug; // debugControlVector flags
- int headerSize; // bytes before the first code ptr
- int dataOffset; // starting offset of literal pool
- int totalSize; // header + code size
- AssemblerStatus assemblerStatus; // Success or fix and retry
- int assemblerRetries;
- std::vector<uint8_t> codeBuffer;
- std::vector<uint32_t> mappingTable;
- std::vector<uint16_t> coreVmapTable;
- std::vector<uint16_t> fpVmapTable;
- bool genDebugger; // Generate code for debugger
- bool printMe;
- bool hasClassLiterals; // Contains class ptrs used as literals
- bool hasLoop; // Contains a loop
- bool hasInvoke; // Contains an invoke instruction
- bool heapMemOp; // Mark mem ops for self verification
- bool qdMode; // Compile for code size/compile time
- bool usesLinkRegister; // For self-verification only
- bool methodTraceSupport; // For TraceView profiling
- RegisterPool* regPool;
- int optRound; // round number to tell an LIR's age
- InstructionSet instructionSet;
- /* Number of total regs used in the whole cUnit after SSA transformation */
- int numSSARegs;
- /* Map SSA reg i to the base virtual register/subscript */
- GrowableList* ssaBaseVRegs;
- GrowableList* ssaSubscripts;
+ int numBlocks;
+ GrowableList blockList;
+ Compiler* compiler; // Compiler driving this compiler
+ ClassLinker* class_linker; // Linker to resolve fields and methods
+ const DexFile* dex_file; // DexFile containing the method being compiled
+ DexCache* dex_cache; // DexFile's corresponding cache
+ const ClassLoader* class_loader; // compiling method's class loader
+ uint32_t method_idx; // compiling method's index into method_ids of DexFile
+ const DexFile::CodeItem* code_item; // compiling method's DexFile code_item
+ uint32_t access_flags; // compiling method's access flags
+ const char* shorty; // compiling method's shorty
+ LIR* firstLIRInsn;
+ LIR* lastLIRInsn;
+ LIR* literalList; // Constants
+ LIR* methodLiteralList; // Method literals requiring patching
+ LIR* codeLiteralList; // Code literals requiring patching
+ LIR* classPointerList; // Relocatable
+ int numClassPointers;
+ LIR* chainCellOffsetLIR;
+ uint32_t disableOpt; // optControlVector flags
+ uint32_t enableDebug; // debugControlVector flags
+ int headerSize; // bytes before the first code ptr
+ int dataOffset; // starting offset of literal pool
+ int totalSize; // header + code size
+ AssemblerStatus assemblerStatus; // Success or fix and retry
+ int assemblerRetries;
+ std::vector<uint8_t> codeBuffer;
+ std::vector<uint32_t> mappingTable;
+ std::vector<uint16_t> coreVmapTable;
+ std::vector<uint16_t> fpVmapTable;
+ bool genDebugger; // Generate code for debugger
+ bool printMe;
+ bool hasClassLiterals; // Contains class ptrs used as literals
+ bool hasLoop; // Contains a loop
+ bool hasInvoke; // Contains an invoke instruction
+ bool heapMemOp; // Mark mem ops for self verification
+ bool qdMode; // Compile for code size/compile time
+ bool usesLinkRegister; // For self-verification only
+ bool methodTraceSupport; // For TraceView profiling
+ RegisterPool* regPool;
+ int optRound; // round number to tell an LIR's age
+ InstructionSet instructionSet;
+ /* Number of total regs used in the whole cUnit after SSA transformation */
+ int numSSARegs;
+ /* Map SSA reg i to the base virtual register/subscript */
+ GrowableList* ssaBaseVRegs;
+ GrowableList* ssaSubscripts;
- /* The following are new data structures to support SSA representations */
- /* Map original Dalvik virtual reg i to the current SSA name */
- int* vRegToSSAMap; // length == method->registersSize
- int* SSALastDefs; // length == method->registersSize
- ArenaBitVector* isConstantV; // length == numSSAReg
- int* constantValues; // length == numSSAReg
- int* phiAliasMap; // length == numSSAReg
- MIR* phiList;
+ /* The following are new data structures to support SSA representations */
+ /* Map original Dalvik virtual reg i to the current SSA name */
+ int* vRegToSSAMap; // length == method->registersSize
+ int* SSALastDefs; // length == method->registersSize
+ ArenaBitVector* isConstantV; // length == numSSAReg
+ int* constantValues; // length == numSSAReg
+ int* phiAliasMap; // length == numSSAReg
+ MIR* phiList;
- /* Use counts of ssa names */
- GrowableList useCounts; // Weighted by nesting depth
- GrowableList rawUseCounts; // Not weighted
+ /* Use counts of ssa names */
+ GrowableList useCounts; // Weighted by nesting depth
+ GrowableList rawUseCounts; // Not weighted
- /* Optimization support */
- GrowableList loopHeaders;
+ /* Optimization support */
+ GrowableList loopHeaders;
- /* Map SSA names to location */
- RegLocation* regLocation;
- int sequenceNumber;
+ /* Map SSA names to location */
+ RegLocation* regLocation;
+ int sequenceNumber;
- /* Keep track of Dalvik vReg to physical register mappings */
- PromotionMap* promotionMap;
+ /* Keep track of Dalvik vReg to physical register mappings */
+ PromotionMap* promotionMap;
- /* SSA name for Method* */
- int methodSReg;
+ /* SSA name for Method* */
+ int methodSReg;
- /*
- * Set to the Dalvik PC of the switch instruction if it has more than
- * MAX_CHAINED_SWITCH_CASES cases.
- */
- const u2* switchOverflowPad;
+ /*
+ * Set to the Dalvik PC of the switch instruction if it has more than
+ * MAX_CHAINED_SWITCH_CASES cases.
+ */
+ const u2* switchOverflowPad;
- int numReachableBlocks;
- int numDalvikRegisters; // method->registersSize
- BasicBlock* entryBlock;
- BasicBlock* exitBlock;
- BasicBlock* curBlock;
- BasicBlock* nextCodegenBlock; // for extended trace codegen
- GrowableList dfsOrder;
- GrowableList dfsPostOrder;
- GrowableList domPostOrderTraversal;
- GrowableList throwLaunchpads;
- GrowableList suspendLaunchpads;
- GrowableList intrinsicLaunchpads;
- GrowableList compilerTemps;
- int* iDomList;
- ArenaBitVector* tryBlockAddr;
- ArenaBitVector** defBlockMatrix; // numDalvikRegister x numBlocks
- ArenaBitVector* tempBlockV;
- ArenaBitVector* tempDalvikRegisterV;
- ArenaBitVector* tempSSARegisterV; // numSSARegs
- bool printSSANames;
- void* blockLabelList;
- bool quitLoopMode; // cold path/complex bytecode
- int preservedRegsUsed; // How many callee save regs used
- /*
- * Frame layout details.
- * NOTE: for debug support it will be necessary to add a structure
- * to map the Dalvik virtual registers to the promoted registers.
- * NOTE: "num" fields are in 4-byte words, "Size" and "Offset" in bytes.
- */
- int numIns;
- int numOuts;
- int numRegs; // Unlike numDalvikRegisters, does not include ins
- int numCoreSpills;
- int numFPSpills;
- int numCompilerTemps;
- int frameSize;
- unsigned int coreSpillMask;
- unsigned int fpSpillMask;
- unsigned int attrs;
- /*
- * CLEANUP/RESTRUCTURE: The code generation utilities don't have a built-in
- * mechanism to propagate the original Dalvik opcode address to the
- * associated generated instructions. For the trace compiler, this wasn't
- * necessary because the interpreter handled all throws and debugging
- * requests. For now we'll handle this by placing the Dalvik offset
- * in the CompilationUnit struct before codegen for each instruction.
- * The low-level LIR creation utilites will pull it from here. Should
- * be rewritten.
- */
- int currentDalvikOffset;
- GrowableList switchTables;
- GrowableList fillArrayData;
- const u2* insns;
- u4 insnsSize;
- bool disableDataflow; // Skip dataflow analysis if possible
- SafeMap<unsigned int, BasicBlock*> blockMap; // findBlock lookup cache
- SafeMap<unsigned int, LIR*> boundaryMap; // boundary lookup cache
- int defCount; // Used to estimate number of SSA names
+ int numReachableBlocks;
+ int numDalvikRegisters; // method->registersSize
+ BasicBlock* entryBlock;
+ BasicBlock* exitBlock;
+ BasicBlock* curBlock;
+ BasicBlock* nextCodegenBlock; // for extended trace codegen
+ GrowableList dfsOrder;
+ GrowableList dfsPostOrder;
+ GrowableList domPostOrderTraversal;
+ GrowableList throwLaunchpads;
+ GrowableList suspendLaunchpads;
+ GrowableList intrinsicLaunchpads;
+ GrowableList compilerTemps;
+ int* iDomList;
+ ArenaBitVector* tryBlockAddr;
+ ArenaBitVector** defBlockMatrix; // numDalvikRegister x numBlocks
+ ArenaBitVector* tempBlockV;
+ ArenaBitVector* tempDalvikRegisterV;
+ ArenaBitVector* tempSSARegisterV; // numSSARegs
+ bool printSSANames;
+ void* blockLabelList;
+ bool quitLoopMode; // cold path/complex bytecode
+ int preservedRegsUsed; // How many callee save regs used
+ /*
+ * Frame layout details.
+ * NOTE: for debug support it will be necessary to add a structure
+ * to map the Dalvik virtual registers to the promoted registers.
+ * NOTE: "num" fields are in 4-byte words, "Size" and "Offset" in bytes.
+ */
+ int numIns;
+ int numOuts;
+ int numRegs; // Unlike numDalvikRegisters, does not include ins
+ int numCoreSpills;
+ int numFPSpills;
+ int numCompilerTemps;
+ int frameSize;
+ unsigned int coreSpillMask;
+ unsigned int fpSpillMask;
+ unsigned int attrs;
+ /*
+ * CLEANUP/RESTRUCTURE: The code generation utilities don't have a built-in
+ * mechanism to propagate the original Dalvik opcode address to the
+ * associated generated instructions. For the trace compiler, this wasn't
+ * necessary because the interpreter handled all throws and debugging
+ * requests. For now we'll handle this by placing the Dalvik offset
+ * in the CompilationUnit struct before codegen for each instruction.
+ * The low-level LIR creation utilites will pull it from here. Should
+ * be rewritten.
+ */
+ int currentDalvikOffset;
+ GrowableList switchTables;
+ GrowableList fillArrayData;
+ const u2* insns;
+ u4 insnsSize;
+ bool disableDataflow; // Skip dataflow analysis if possible
+ SafeMap<unsigned int, BasicBlock*> blockMap; // findBlock lookup cache
+ SafeMap<unsigned int, LIR*> boundaryMap; // boundary lookup cache
+ int defCount; // Used to estimate number of SSA names
- // If non-empty, apply optimizer/debug flags only to matching methods.
- std::string compilerMethodMatch;
- // Flips sense of compilerMethodMatch - apply flags if doesn't match.
- bool compilerFlipMatch;
- ArenaMemBlock* arenaHead;
- ArenaMemBlock* currentArena;
- int numArenaBlocks;
- Memstats* mstats;
- int* opcodeCount; // Count Dalvik opcodes for tuning
+ // If non-empty, apply optimizer/debug flags only to matching methods.
+ std::string compilerMethodMatch;
+ // Flips sense of compilerMethodMatch - apply flags if doesn't match.
+ bool compilerFlipMatch;
+ ArenaMemBlock* arenaHead;
+ ArenaMemBlock* currentArena;
+ int numArenaBlocks;
+ Memstats* mstats;
+ int* opcodeCount; // Count Dalvik opcodes for tuning
#ifndef NDEBUG
- /*
- * Sanity checking for the register temp tracking. The same ssa
- * name should never be associated with one temp register per
- * instruction compilation.
- */
- int liveSReg;
+ /*
+ * Sanity checking for the register temp tracking. The same ssa
+ * name should never be associated with one temp register per
+ * instruction compilation.
+ */
+ int liveSReg;
#endif
};
enum OpSize {
- kWord,
- kLong,
- kSingle,
- kDouble,
- kUnsignedHalf,
- kSignedHalf,
- kUnsignedByte,
- kSignedByte,
+ kWord,
+ kLong,
+ kSingle,
+ kDouble,
+ kUnsignedHalf,
+ kSignedHalf,
+ kUnsignedByte,
+ kSignedByte,
};
enum OpKind {
- kOpMov,
- kOpMvn,
- kOpCmp,
- kOpLsl,
- kOpLsr,
- kOpAsr,
- kOpRor,
- kOpNot,
- kOpAnd,
- kOpOr,
- kOpXor,
- kOpNeg,
- kOpAdd,
- kOpAdc,
- kOpSub,
- kOpSbc,
- kOpRsub,
- kOpMul,
- kOpDiv,
- kOpRem,
- kOpBic,
- kOpCmn,
- kOpTst,
- kOpBkpt,
- kOpBlx,
- kOpPush,
- kOpPop,
- kOp2Char,
- kOp2Short,
- kOp2Byte,
- kOpCondBr,
- kOpUncondBr,
- kOpBx,
- kOpInvalid,
+ kOpMov,
+ kOpMvn,
+ kOpCmp,
+ kOpLsl,
+ kOpLsr,
+ kOpAsr,
+ kOpRor,
+ kOpNot,
+ kOpAnd,
+ kOpOr,
+ kOpXor,
+ kOpNeg,
+ kOpAdd,
+ kOpAdc,
+ kOpSub,
+ kOpSbc,
+ kOpRsub,
+ kOpMul,
+ kOpDiv,
+ kOpRem,
+ kOpBic,
+ kOpCmn,
+ kOpTst,
+ kOpBkpt,
+ kOpBlx,
+ kOpPush,
+ kOpPop,
+ kOp2Char,
+ kOp2Short,
+ kOp2Byte,
+ kOpCondBr,
+ kOpUncondBr,
+ kOpBx,
+ kOpInvalid,
};
std::ostream& operator<<(std::ostream& os, const OpKind& kind);
enum ConditionCode {
- kCondEq, // equal
- kCondNe, // not equal
- kCondCs, // carry set (unsigned less than)
- kCondUlt = kCondCs,
- kCondCc, // carry clear (unsigned greater than or same)
- kCondUge = kCondCc,
- kCondMi, // minus
- kCondPl, // plus, positive or zero
- kCondVs, // overflow
- kCondVc, // no overflow
- kCondHi, // unsigned greater than
- kCondLs, // unsigned lower or same
- kCondGe, // signed greater than or equal
- kCondLt, // signed less than
- kCondGt, // signed greater than
- kCondLe, // signed less than or equal
- kCondAl, // always
- kCondNv, // never
+ kCondEq, // equal
+ kCondNe, // not equal
+ kCondCs, // carry set (unsigned less than)
+ kCondUlt = kCondCs,
+ kCondCc, // carry clear (unsigned greater than or same)
+ kCondUge = kCondCc,
+ kCondMi, // minus
+ kCondPl, // plus, positive or zero
+ kCondVs, // overflow
+ kCondVc, // no overflow
+ kCondHi, // unsigned greater than
+ kCondLs, // unsigned lower or same
+ kCondGe, // signed greater than or equal
+ kCondLt, // signed less than
+ kCondGt, // signed greater than
+ kCondLe, // signed less than or equal
+ kCondAl, // always
+ kCondNv, // never
};
enum ThrowKind {
- kThrowNullPointer,
- kThrowDivZero,
- kThrowArrayBounds,
- kThrowVerificationError,
- kThrowNoSuchMethod,
- kThrowStackOverflow,
+ kThrowNullPointer,
+ kThrowDivZero,
+ kThrowArrayBounds,
+ kThrowVerificationError,
+ kThrowNoSuchMethod,
+ kThrowStackOverflow,
};
struct SwitchTable {
- int offset;
- const u2* table; // Original dex table
- int vaddr; // Dalvik offset of switch opcode
- LIR* anchor; // Reference instruction for relative offsets
- LIR** targets; // Array of case targets
+ int offset;
+ const u2* table; // Original dex table
+ int vaddr; // Dalvik offset of switch opcode
+ LIR* anchor; // Reference instruction for relative offsets
+ LIR** targets; // Array of case targets
};
struct FillArrayData {
- int offset;
- const u2* table; // Original dex table
- int size;
- int vaddr; // Dalvik offset of FILL_ARRAY_DATA opcode
+ int offset;
+ const u2* table; // Original dex table
+ int size;
+ int vaddr; // Dalvik offset of FILL_ARRAY_DATA opcode
};
#define MAX_PATTERN_LEN 5
enum SpecialCaseHandler {
- kNoHandler,
- kNullMethod,
- kConstFunction,
- kIGet,
- kIGetBoolean,
- kIGetObject,
- kIGetByte,
- kIGetChar,
- kIGetShort,
- kIGetWide,
- kIPut,
- kIPutBoolean,
- kIPutObject,
- kIPutByte,
- kIPutChar,
- kIPutShort,
- kIPutWide,
- kIdentity,
+ kNoHandler,
+ kNullMethod,
+ kConstFunction,
+ kIGet,
+ kIGetBoolean,
+ kIGetObject,
+ kIGetByte,
+ kIGetChar,
+ kIGetShort,
+ kIGetWide,
+ kIPut,
+ kIPutBoolean,
+ kIPutObject,
+ kIPutByte,
+ kIPutChar,
+ kIPutShort,
+ kIPutWide,
+ kIdentity,
};
struct CodePattern {
- const Instruction::Code opcodes[MAX_PATTERN_LEN];
- const SpecialCaseHandler handlerCode;
+ const Instruction::Code opcodes[MAX_PATTERN_LEN];
+ const SpecialCaseHandler handlerCode;
};
static const CodePattern specialPatterns[] = {
- {{Instruction::RETURN_VOID}, kNullMethod},
- {{Instruction::CONST, Instruction::RETURN}, kConstFunction},
- {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction},
- {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction},
- {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction},
- {{Instruction::IGET, Instruction:: RETURN}, kIGet},
- {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean},
- {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject},
- {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte},
- {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar},
- {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort},
- {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide},
- {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut},
- {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean},
- {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject},
- {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte},
- {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar},
- {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort},
- {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide},
- {{Instruction::RETURN}, kIdentity},
- {{Instruction::RETURN_OBJECT}, kIdentity},
- {{Instruction::RETURN_WIDE}, kIdentity},
+ {{Instruction::RETURN_VOID}, kNullMethod},
+ {{Instruction::CONST, Instruction::RETURN}, kConstFunction},
+ {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction},
+ {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction},
+ {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction},
+ {{Instruction::IGET, Instruction:: RETURN}, kIGet},
+ {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean},
+ {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject},
+ {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte},
+ {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar},
+ {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort},
+ {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide},
+ {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut},
+ {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean},
+ {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject},
+ {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte},
+ {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar},
+ {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort},
+ {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide},
+ {{Instruction::RETURN}, kIdentity},
+ {{Instruction::RETURN_OBJECT}, kIdentity},
+ {{Instruction::RETURN_WIDE}, kIdentity},
};
BasicBlock* oatNewBB(CompilationUnit* cUnit, BBType blockType, int blockId);
diff --git a/src/compiler/CompilerUtility.h b/src/compiler/CompilerUtility.h
index 4a15f26..d431678 100644
--- a/src/compiler/CompilerUtility.h
+++ b/src/compiler/CompilerUtility.h
@@ -31,10 +31,10 @@
//#define WITH_MEMSTATS
struct ArenaMemBlock {
- size_t blockSize;
- size_t bytesAllocated;
- ArenaMemBlock *next;
- char ptr[0];
+ size_t blockSize;
+ size_t bytesAllocated;
+ ArenaMemBlock *next;
+ char ptr[0];
};
void* oatNew(CompilationUnit* cUnit, size_t size, bool zero,
@@ -55,9 +55,9 @@
};
struct GrowableListIterator {
- GrowableList* list;
- size_t idx;
- size_t size;
+ GrowableList* list;
+ size_t idx;
+ size_t size;
};
/*
@@ -67,19 +67,19 @@
* All operations on a BitVector are unsynchronized.
*/
struct ArenaBitVector {
- bool expandable; /* expand bitmap if we run out? */
- u4 storageSize; /* current size, in 32-bit words */
- u4* storage;
+ bool expandable; /* expand bitmap if we run out? */
+ u4 storageSize; /* current size, in 32-bit words */
+ u4* storage;
#ifdef WITH_MEMSTATS
- oatBitMapKind kind; /* for memory use tuning */
+ oatBitMapKind kind; /* for memory use tuning */
#endif
};
/* Handy iterator to walk through the bit positions set to 1 */
struct ArenaBitVectorIterator {
- ArenaBitVector* pBits;
- u4 idx;
- u4 bitSize;
+ ArenaBitVector* pBits;
+ u4 idx;
+ u4 bitSize;
};
#define GET_ELEM_N(LIST, TYPE, N) (((TYPE*) LIST->elemList)[N])
diff --git a/src/compiler/Dataflow.cc b/src/compiler/Dataflow.cc
index bea5ef0..2ba0cc1 100644
--- a/src/compiler/Dataflow.cc
+++ b/src/compiler/Dataflow.cc
@@ -29,919 +29,916 @@
* scope of optimizations but will not cause mis-optimizations.
*/
const int oatDataFlowAttributes[kMirOpLast] = {
- // 00 NOP
- DF_NOP,
+ // 00 NOP
+ DF_NOP,
- // 01 MOVE vA, vB
- DF_DA | DF_UB | DF_IS_MOVE,
+ // 01 MOVE vA, vB
+ DF_DA | DF_UB | DF_IS_MOVE,
- // 02 MOVE_FROM16 vAA, vBBBB
- DF_DA | DF_UB | DF_IS_MOVE,
+ // 02 MOVE_FROM16 vAA, vBBBB
+ DF_DA | DF_UB | DF_IS_MOVE,
- // 03 MOVE_16 vAAAA, vBBBB
- DF_DA | DF_UB | DF_IS_MOVE,
+ // 03 MOVE_16 vAAAA, vBBBB
+ DF_DA | DF_UB | DF_IS_MOVE,
- // 04 MOVE_WIDE vA, vB
- DF_DA_WIDE | DF_UB_WIDE | DF_IS_MOVE,
+ // 04 MOVE_WIDE vA, vB
+ DF_DA_WIDE | DF_UB_WIDE | DF_IS_MOVE,
- // 05 MOVE_WIDE_FROM16 vAA, vBBBB
- DF_DA_WIDE | DF_UB_WIDE | DF_IS_MOVE,
+ // 05 MOVE_WIDE_FROM16 vAA, vBBBB
+ DF_DA_WIDE | DF_UB_WIDE | DF_IS_MOVE,
- // 06 MOVE_WIDE_16 vAAAA, vBBBB
- DF_DA_WIDE | DF_UB_WIDE | DF_IS_MOVE,
+ // 06 MOVE_WIDE_16 vAAAA, vBBBB
+ DF_DA_WIDE | DF_UB_WIDE | DF_IS_MOVE,
- // 07 MOVE_OBJECT vA, vB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_CORE_A | DF_CORE_B,
+ // 07 MOVE_OBJECT vA, vB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_CORE_A | DF_CORE_B,
- // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_CORE_A | DF_CORE_B,
+ // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_CORE_A | DF_CORE_B,
- // 09 MOVE_OBJECT_16 vAAAA, vBBBB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_CORE_A | DF_CORE_B,
+ // 09 MOVE_OBJECT_16 vAAAA, vBBBB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_CORE_A | DF_CORE_B,
- // 0A MOVE_RESULT vAA
- DF_DA,
+ // 0A MOVE_RESULT vAA
+ DF_DA,
- // 0B MOVE_RESULT_WIDE vAA
- DF_DA_WIDE,
+ // 0B MOVE_RESULT_WIDE vAA
+ DF_DA_WIDE,
- // 0C MOVE_RESULT_OBJECT vAA
- DF_DA | DF_CORE_A,
+ // 0C MOVE_RESULT_OBJECT vAA
+ DF_DA | DF_CORE_A,
- // 0D MOVE_EXCEPTION vAA
- DF_DA | DF_CORE_A,
+ // 0D MOVE_EXCEPTION vAA
+ DF_DA | DF_CORE_A,
- // 0E RETURN_VOID
- DF_NOP,
+ // 0E RETURN_VOID
+ DF_NOP,
- // 0F RETURN vAA
- DF_UA,
+ // 0F RETURN vAA
+ DF_UA,
- // 10 RETURN_WIDE vAA
- DF_UA_WIDE,
+ // 10 RETURN_WIDE vAA
+ DF_UA_WIDE,
- // 11 RETURN_OBJECT vAA
- DF_UA | DF_CORE_A,
+ // 11 RETURN_OBJECT vAA
+ DF_UA | DF_CORE_A,
- // 12 CONST_4 vA, #+B
- DF_DA | DF_SETS_CONST,
+ // 12 CONST_4 vA, #+B
+ DF_DA | DF_SETS_CONST,
- // 13 CONST_16 vAA, #+BBBB
- DF_DA | DF_SETS_CONST,
+ // 13 CONST_16 vAA, #+BBBB
+ DF_DA | DF_SETS_CONST,
- // 14 CONST vAA, #+BBBBBBBB
- DF_DA | DF_SETS_CONST,
+ // 14 CONST vAA, #+BBBBBBBB
+ DF_DA | DF_SETS_CONST,
- // 15 CONST_HIGH16 VAA, #+BBBB0000
- DF_DA | DF_SETS_CONST,
+ // 15 CONST_HIGH16 VAA, #+BBBB0000
+ DF_DA | DF_SETS_CONST,
- // 16 CONST_WIDE_16 vAA, #+BBBB
- DF_DA_WIDE | DF_SETS_CONST,
+ // 16 CONST_WIDE_16 vAA, #+BBBB
+ DF_DA_WIDE | DF_SETS_CONST,
- // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
- DF_DA_WIDE | DF_SETS_CONST,
+ // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
+ DF_DA_WIDE | DF_SETS_CONST,
- // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
- DF_DA_WIDE | DF_SETS_CONST,
+ // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
+ DF_DA_WIDE | DF_SETS_CONST,
- // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
- DF_DA_WIDE | DF_SETS_CONST,
+ // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
+ DF_DA_WIDE | DF_SETS_CONST,
- // 1A CONST_STRING vAA, string@BBBB
- DF_DA | DF_CORE_A,
+ // 1A CONST_STRING vAA, string@BBBB
+ DF_DA | DF_CORE_A,
- // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
- DF_DA | DF_CORE_A,
+ // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
+ DF_DA | DF_CORE_A,
- // 1C CONST_CLASS vAA, type@BBBB
- DF_DA | DF_CORE_A,
+ // 1C CONST_CLASS vAA, type@BBBB
+ DF_DA | DF_CORE_A,
- // 1D MONITOR_ENTER vAA
- DF_UA | DF_NULL_CHK_0 | DF_CORE_A,
+ // 1D MONITOR_ENTER vAA
+ DF_UA | DF_NULL_CHK_0 | DF_CORE_A,
- // 1E MONITOR_EXIT vAA
- DF_UA | DF_NULL_CHK_0 | DF_CORE_A,
+ // 1E MONITOR_EXIT vAA
+ DF_UA | DF_NULL_CHK_0 | DF_CORE_A,
- // 1F CHK_CAST vAA, type@BBBB
- DF_UA | DF_CORE_A | DF_UMS,
+ // 1F CHK_CAST vAA, type@BBBB
+ DF_UA | DF_CORE_A | DF_UMS,
- // 20 INSTANCE_OF vA, vB, type@CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B | DF_UMS,
+ // 20 INSTANCE_OF vA, vB, type@CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B | DF_UMS,
- // 21 ARRAY_LENGTH vA, vB
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_CORE_B,
+ // 21 ARRAY_LENGTH vA, vB
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_CORE_B,
- // 22 NEW_INSTANCE vAA, type@BBBB
- DF_DA | DF_NON_NULL_DST | DF_CORE_A | DF_UMS,
+ // 22 NEW_INSTANCE vAA, type@BBBB
+ DF_DA | DF_NON_NULL_DST | DF_CORE_A | DF_UMS,
- // 23 NEW_ARRAY vA, vB, type@CCCC
- DF_DA | DF_UB | DF_NON_NULL_DST | DF_CORE_A | DF_CORE_B | DF_UMS,
+ // 23 NEW_ARRAY vA, vB, type@CCCC
+ DF_DA | DF_UB | DF_NON_NULL_DST | DF_CORE_A | DF_CORE_B | DF_UMS,
- // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
+ // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
- // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
- DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
+ // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
+ DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
- // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
- DF_UA | DF_CORE_A | DF_UMS,
+ // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
+ DF_UA | DF_CORE_A | DF_UMS,
- // 27 THROW vAA
- DF_UA | DF_CORE_A | DF_UMS,
+ // 27 THROW vAA
+ DF_UA | DF_CORE_A | DF_UMS,
- // 28 GOTO
- DF_NOP,
+ // 28 GOTO
+ DF_NOP,
- // 29 GOTO_16
- DF_NOP,
+ // 29 GOTO_16
+ DF_NOP,
- // 2A GOTO_32
- DF_NOP,
+ // 2A GOTO_32
+ DF_NOP,
- // 2B PACKED_SWITCH vAA, +BBBBBBBB
- DF_UA,
+ // 2B PACKED_SWITCH vAA, +BBBBBBBB
+ DF_UA,
- // 2C SPARSE_SWITCH vAA, +BBBBBBBB
- DF_UA,
+ // 2C SPARSE_SWITCH vAA, +BBBBBBBB
+ DF_UA,
- // 2D CMPL_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+ // 2D CMPL_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
- // 2E CMPG_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+ // 2E CMPG_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
- // 2F CMPL_DOUBLE vAA, vBB, vCC
- DF_DA | DF_UB_WIDE | DF_UC_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+ // 2F CMPL_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_UB_WIDE | DF_UC_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
- // 30 CMPG_DOUBLE vAA, vBB, vCC
- DF_DA | DF_UB_WIDE | DF_UC_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+ // 30 CMPG_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_UB_WIDE | DF_UC_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
- // 31 CMP_LONG vAA, vBB, vCC
- DF_DA | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 31 CMP_LONG vAA, vBB, vCC
+ DF_DA | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 32 IF_EQ vA, vB, +CCCC
- DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 32 IF_EQ vA, vB, +CCCC
+ DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 33 IF_NE vA, vB, +CCCC
- DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 33 IF_NE vA, vB, +CCCC
+ DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 34 IF_LT vA, vB, +CCCC
- DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 34 IF_LT vA, vB, +CCCC
+ DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 35 IF_GE vA, vB, +CCCC
- DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 35 IF_GE vA, vB, +CCCC
+ DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 36 IF_GT vA, vB, +CCCC
- DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 36 IF_GT vA, vB, +CCCC
+ DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 37 IF_LE vA, vB, +CCCC
- DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 37 IF_LE vA, vB, +CCCC
+ DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 38 IF_EQZ vAA, +BBBB
- DF_UA | DF_CORE_A,
+ // 38 IF_EQZ vAA, +BBBB
+ DF_UA | DF_CORE_A,
- // 39 IF_NEZ vAA, +BBBB
- DF_UA | DF_CORE_A,
+ // 39 IF_NEZ vAA, +BBBB
+ DF_UA | DF_CORE_A,
- // 3A IF_LTZ vAA, +BBBB
- DF_UA | DF_CORE_A,
+ // 3A IF_LTZ vAA, +BBBB
+ DF_UA | DF_CORE_A,
- // 3B IF_GEZ vAA, +BBBB
- DF_UA | DF_CORE_A,
+ // 3B IF_GEZ vAA, +BBBB
+ DF_UA | DF_CORE_A,
- // 3C IF_GTZ vAA, +BBBB
- DF_UA | DF_CORE_A,
+ // 3C IF_GTZ vAA, +BBBB
+ DF_UA | DF_CORE_A,
- // 3D IF_LEZ vAA, +BBBB
- DF_UA | DF_CORE_A,
+ // 3D IF_LEZ vAA, +BBBB
+ DF_UA | DF_CORE_A,
- // 3E UNUSED_3E
- DF_NOP,
+ // 3E UNUSED_3E
+ DF_NOP,
- // 3F UNUSED_3F
- DF_NOP,
+ // 3F UNUSED_3F
+ DF_NOP,
- // 40 UNUSED_40
- DF_NOP,
+ // 40 UNUSED_40
+ DF_NOP,
- // 41 UNUSED_41
- DF_NOP,
+ // 41 UNUSED_41
+ DF_NOP,
- // 42 UNUSED_42
- DF_NOP,
+ // 42 UNUSED_42
+ DF_NOP,
- // 43 UNUSED_43
- DF_NOP,
+ // 43 UNUSED_43
+ DF_NOP,
- // 44 AGET vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
+ // 44 AGET vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
- // 45 AGET_WIDE vAA, vBB, vCC
- DF_DA_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
+ // 45 AGET_WIDE vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
- // 46 AGET_OBJECT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
+ // 46 AGET_OBJECT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
- // 47 AGET_BOOLEAN vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
+ // 47 AGET_BOOLEAN vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
- // 48 AGET_BYTE vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
+ // 48 AGET_BYTE vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
- // 49 AGET_CHAR vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
+ // 49 AGET_CHAR vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
- // 4A AGET_SHORT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
+ // 4A AGET_SHORT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_CORE_B | DF_CORE_C,
- // 4B APUT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
+ // 4B APUT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
- // 4C APUT_WIDE vAA, vBB, vCC
- DF_UA_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_CORE_B | DF_CORE_C,
+ // 4C APUT_WIDE vAA, vBB, vCC
+ DF_UA_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_CORE_B | DF_CORE_C,
- // 4D APUT_OBJECT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
+ // 4D APUT_OBJECT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
- // 4E APUT_BOOLEAN vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
+ // 4E APUT_BOOLEAN vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
- // 4F APUT_BYTE vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
+ // 4F APUT_BYTE vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
- // 50 APUT_CHAR vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
+ // 50 APUT_CHAR vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
- // 51 APUT_SHORT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
+ // 51 APUT_SHORT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_CORE_B | DF_CORE_C,
- // 52 IGET vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // 52 IGET vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // 53 IGET_WIDE vA, vB, field@CCCC
- DF_DA_WIDE | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // 53 IGET_WIDE vA, vB, field@CCCC
+ DF_DA_WIDE | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // 54 IGET_OBJECT vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // 54 IGET_OBJECT vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // 55 IGET_BOOLEAN vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // 55 IGET_BOOLEAN vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // 56 IGET_BYTE vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // 56 IGET_BYTE vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // 57 IGET_CHAR vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // 57 IGET_CHAR vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // 58 IGET_SHORT vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // 58 IGET_SHORT vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // 59 IPUT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
+ // 59 IPUT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
- // 5A IPUT_WIDE vA, vB, field@CCCC
- DF_UA_WIDE | DF_UB | DF_NULL_CHK_2 | DF_CORE_B,
+ // 5A IPUT_WIDE vA, vB, field@CCCC
+ DF_UA_WIDE | DF_UB | DF_NULL_CHK_2 | DF_CORE_B,
- // 5B IPUT_OBJECT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
+ // 5B IPUT_OBJECT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
- // 5C IPUT_BOOLEAN vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
+ // 5C IPUT_BOOLEAN vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
- // 5D IPUT_BYTE vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
+ // 5D IPUT_BYTE vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
- // 5E IPUT_CHAR vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
+ // 5E IPUT_CHAR vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
- // 5F IPUT_SHORT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
+ // 5F IPUT_SHORT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
- // 60 SGET vAA, field@BBBB
- DF_DA | DF_UMS,
+ // 60 SGET vAA, field@BBBB
+ DF_DA | DF_UMS,
- // 61 SGET_WIDE vAA, field@BBBB
- DF_DA_WIDE | DF_UMS,
+ // 61 SGET_WIDE vAA, field@BBBB
+ DF_DA_WIDE | DF_UMS,
- // 62 SGET_OBJECT vAA, field@BBBB
- DF_DA | DF_CORE_A | DF_UMS,
+ // 62 SGET_OBJECT vAA, field@BBBB
+ DF_DA | DF_CORE_A | DF_UMS,
- // 63 SGET_BOOLEAN vAA, field@BBBB
- DF_DA | DF_UMS,
+ // 63 SGET_BOOLEAN vAA, field@BBBB
+ DF_DA | DF_UMS,
- // 64 SGET_BYTE vAA, field@BBBB
- DF_DA | DF_UMS,
+ // 64 SGET_BYTE vAA, field@BBBB
+ DF_DA | DF_UMS,
- // 65 SGET_CHAR vAA, field@BBBB
- DF_DA | DF_UMS,
+ // 65 SGET_CHAR vAA, field@BBBB
+ DF_DA | DF_UMS,
- // 66 SGET_SHORT vAA, field@BBBB
- DF_DA | DF_UMS,
+ // 66 SGET_SHORT vAA, field@BBBB
+ DF_DA | DF_UMS,
- // 67 SPUT vAA, field@BBBB
- DF_UA | DF_UMS,
+ // 67 SPUT vAA, field@BBBB
+ DF_UA | DF_UMS,
- // 68 SPUT_WIDE vAA, field@BBBB
- DF_UA_WIDE | DF_UMS,
+ // 68 SPUT_WIDE vAA, field@BBBB
+ DF_UA_WIDE | DF_UMS,
- // 69 SPUT_OBJECT vAA, field@BBBB
- DF_UA | DF_CORE_A | DF_UMS,
+ // 69 SPUT_OBJECT vAA, field@BBBB
+ DF_UA | DF_CORE_A | DF_UMS,
- // 6A SPUT_BOOLEAN vAA, field@BBBB
- DF_UA | DF_UMS,
+ // 6A SPUT_BOOLEAN vAA, field@BBBB
+ DF_UA | DF_UMS,
- // 6B SPUT_BYTE vAA, field@BBBB
- DF_UA | DF_UMS,
+ // 6B SPUT_BYTE vAA, field@BBBB
+ DF_UA | DF_UMS,
- // 6C SPUT_CHAR vAA, field@BBBB
- DF_UA | DF_UMS,
+ // 6C SPUT_CHAR vAA, field@BBBB
+ DF_UA | DF_UMS,
- // 6D SPUT_SHORT vAA, field@BBBB
- DF_UA | DF_UMS,
+ // 6D SPUT_SHORT vAA, field@BBBB
+ DF_UA | DF_UMS,
- // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+ // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+ // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+ // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_UMS,
+ // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_UMS,
- // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_UMS,
+ // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_UMS,
- // 73 UNUSED_73
- DF_NOP,
+ // 73 UNUSED_73
+ DF_NOP,
- // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+ // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
- // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+ // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
- // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+ // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
- // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_UMS,
+ // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_UMS,
- // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_UMS,
+ // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_UMS,
- // 79 UNUSED_79
- DF_NOP,
+ // 79 UNUSED_79
+ DF_NOP,
- // 7A UNUSED_7A
- DF_NOP,
+ // 7A UNUSED_7A
+ DF_NOP,
- // 7B NEG_INT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 7B NEG_INT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 7C NOT_INT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 7C NOT_INT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 7D NEG_LONG vA, vB
- DF_DA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // 7D NEG_LONG vA, vB
+ DF_DA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // 7E NOT_LONG vA, vB
- DF_DA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // 7E NOT_LONG vA, vB
+ DF_DA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // 7F NEG_FLOAT vA, vB
- DF_DA | DF_UB | DF_FP_A | DF_FP_B,
+ // 7F NEG_FLOAT vA, vB
+ DF_DA | DF_UB | DF_FP_A | DF_FP_B,
- // 80 NEG_DOUBLE vA, vB
- DF_DA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // 80 NEG_DOUBLE vA, vB
+ DF_DA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // 81 INT_TO_LONG vA, vB
- DF_DA_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 81 INT_TO_LONG vA, vB
+ DF_DA_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
- // 82 INT_TO_FLOAT vA, vB
- DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
+ // 82 INT_TO_FLOAT vA, vB
+ DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
- // 83 INT_TO_DOUBLE vA, vB
- DF_DA_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
+ // 83 INT_TO_DOUBLE vA, vB
+ DF_DA_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
- // 84 LONG_TO_INT vA, vB
- DF_DA | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // 84 LONG_TO_INT vA, vB
+ DF_DA | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // 85 LONG_TO_FLOAT vA, vB
- DF_DA | DF_UB_WIDE | DF_FP_A | DF_CORE_B,
+ // 85 LONG_TO_FLOAT vA, vB
+ DF_DA | DF_UB_WIDE | DF_FP_A | DF_CORE_B,
- // 86 LONG_TO_DOUBLE vA, vB
- DF_DA_WIDE | DF_UB_WIDE | DF_FP_A | DF_CORE_B,
+ // 86 LONG_TO_DOUBLE vA, vB
+ DF_DA_WIDE | DF_UB_WIDE | DF_FP_A | DF_CORE_B,
- // 87 FLOAT_TO_INT vA, vB
- DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
+ // 87 FLOAT_TO_INT vA, vB
+ DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
- // 88 FLOAT_TO_LONG vA, vB
- DF_DA_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
+ // 88 FLOAT_TO_LONG vA, vB
+ DF_DA_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
- // 89 FLOAT_TO_DOUBLE vA, vB
- DF_DA_WIDE | DF_UB | DF_FP_A | DF_FP_B,
+ // 89 FLOAT_TO_DOUBLE vA, vB
+ DF_DA_WIDE | DF_UB | DF_FP_A | DF_FP_B,
- // 8A DOUBLE_TO_INT vA, vB
- DF_DA | DF_UB_WIDE | DF_FP_B | DF_CORE_A,
+ // 8A DOUBLE_TO_INT vA, vB
+ DF_DA | DF_UB_WIDE | DF_FP_B | DF_CORE_A,
- // 8B DOUBLE_TO_LONG vA, vB
- DF_DA_WIDE | DF_UB_WIDE | DF_FP_B | DF_CORE_A,
+ // 8B DOUBLE_TO_LONG vA, vB
+ DF_DA_WIDE | DF_UB_WIDE | DF_FP_B | DF_CORE_A,
- // 8C DOUBLE_TO_FLOAT vA, vB
- DF_DA | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // 8C DOUBLE_TO_FLOAT vA, vB
+ DF_DA | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // 8D INT_TO_BYTE vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 8D INT_TO_BYTE vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 8E INT_TO_CHAR vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 8E INT_TO_CHAR vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 8F INT_TO_SHORT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // 8F INT_TO_SHORT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // 90 ADD_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_IS_LINEAR | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 90 ADD_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_IS_LINEAR | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 91 SUB_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_IS_LINEAR | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 91 SUB_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_IS_LINEAR | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 92 MUL_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 92 MUL_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 93 DIV_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 93 DIV_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 94 REM_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 94 REM_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 95 AND_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 95 AND_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 96 OR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 96 OR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 97 XOR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 97 XOR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 98 SHL_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 98 SHL_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 99 SHR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 99 SHR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 9A USHR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 9A USHR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 9B ADD_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 9B ADD_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 9C SUB_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 9C SUB_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 9D MUL_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 9D MUL_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 9E DIV_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 9E DIV_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // 9F REM_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // 9F REM_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // A0 AND_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // A0 AND_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // A1 OR_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // A1 OR_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // A2 XOR_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // A2 XOR_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // A3 SHL_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // A3 SHL_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // A4 SHR_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // A4 SHR_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // A5 USHR_LONG vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+ // A5 USHR_LONG vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
- // A6 ADD_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+ // A6 ADD_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
- // A7 SUB_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+ // A7 SUB_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
- // A8 MUL_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+ // A8 MUL_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
- // A9 DIV_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+ // A9 DIV_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
- // AA REM_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+ // AA REM_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
- // AB ADD_DOUBLE vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+ // AB ADD_DOUBLE vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
- // AC SUB_DOUBLE vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+ // AC SUB_DOUBLE vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
- // AD MUL_DOUBLE vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+ // AD MUL_DOUBLE vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
- // AE DIV_DOUBLE vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+ // AE DIV_DOUBLE vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
- // AF REM_DOUBLE vAA, vBB, vCC
- DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+ // AF REM_DOUBLE vAA, vBB, vCC
+ DF_DA_WIDE | DF_UB_WIDE | DF_UC_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
- // B0 ADD_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B0 ADD_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B1 SUB_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B1 SUB_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B2 MUL_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B2 MUL_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B3 DIV_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B3 DIV_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B4 REM_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B4 REM_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B5 AND_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B5 AND_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B6 OR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B6 OR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B7 XOR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B7 XOR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B8 SHL_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B8 SHL_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // B9 SHR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // B9 SHR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // BA USHR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // BA USHR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
- // BB ADD_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // BB ADD_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // BC SUB_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // BC SUB_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // BD MUL_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // BD MUL_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // BE DIV_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // BE DIV_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // BF REM_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // BF REM_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // C0 AND_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // C0 AND_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // C1 OR_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // C1 OR_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // C2 XOR_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // C2 XOR_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // C3 SHL_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+ // C3 SHL_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
- // C4 SHR_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+ // C4 SHR_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
- // C5 USHR_LONG_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+ // C5 USHR_LONG_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
- // C6 ADD_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+ // C6 ADD_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
- // C7 SUB_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+ // C7 SUB_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
- // C8 MUL_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+ // C8 MUL_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
- // C9 DIV_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+ // C9 DIV_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
- // CA REM_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+ // CA REM_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
- // CB ADD_DOUBLE_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // CB ADD_DOUBLE_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // CC SUB_DOUBLE_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // CC SUB_DOUBLE_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // CD MUL_DOUBLE_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // CD MUL_DOUBLE_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // CE DIV_DOUBLE_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // CE DIV_DOUBLE_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // CF REM_DOUBLE_2ADDR vA, vB
- DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // CF REM_DOUBLE_2ADDR vA, vB
+ DF_DA_WIDE | DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // D0 ADD_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D0 ADD_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // D1 RSUB_INT vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D1 RSUB_INT vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // D2 MUL_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D2 MUL_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // D3 DIV_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D3 DIV_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // D4 REM_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D4 REM_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // D5 AND_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D5 AND_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // D6 OR_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D6 OR_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // D7 XOR_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D7 XOR_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // D8 ADD_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_IS_LINEAR | DF_CORE_A | DF_CORE_B,
+ // D8 ADD_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_IS_LINEAR | DF_CORE_A | DF_CORE_B,
- // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // DA MUL_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // DA MUL_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // DB DIV_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // DB DIV_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // DC REM_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // DC REM_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // DD AND_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // DD AND_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // DE OR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // DE OR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // DF XOR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // DF XOR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // E0 SHL_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // E0 SHL_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // E1 SHR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // E1 SHR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // E2 USHR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+ // E2 USHR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // E3 IGET_VOLATILE
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // E3 IGET_VOLATILE
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // E4 IPUT_VOLATILE
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
+ // E4 IPUT_VOLATILE
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_B,
- // E5 SGET_VOLATILE
- DF_DA | DF_UMS,
+ // E5 SGET_VOLATILE
+ DF_DA | DF_UMS,
- // E6 SPUT_VOLATILE
- DF_UA | DF_UMS,
+ // E6 SPUT_VOLATILE
+ DF_UA | DF_UMS,
- // E7 IGET_OBJECT_VOLATILE
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_CORE_B,
+ // E7 IGET_OBJECT_VOLATILE
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_CORE_B,
- // E8 IGET_WIDE_VOLATILE
- DF_DA_WIDE | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
+ // E8 IGET_WIDE_VOLATILE
+ DF_DA_WIDE | DF_UB | DF_NULL_CHK_0 | DF_CORE_B,
- // E9 IPUT_WIDE_VOLATILE
- DF_UA_WIDE | DF_UB | DF_NULL_CHK_2 | DF_CORE_B,
+ // E9 IPUT_WIDE_VOLATILE
+ DF_UA_WIDE | DF_UB | DF_NULL_CHK_2 | DF_CORE_B,
- // EA SGET_WIDE_VOLATILE
- DF_DA_WIDE | DF_UMS,
+ // EA SGET_WIDE_VOLATILE
+ DF_DA_WIDE | DF_UMS,
- // EB SPUT_WIDE_VOLATILE
- DF_UA_WIDE | DF_UMS,
+ // EB SPUT_WIDE_VOLATILE
+ DF_UA_WIDE | DF_UMS,
- // EC BREAKPOINT
- DF_NOP,
+ // EC BREAKPOINT
+ DF_NOP,
- // ED THROW_VERIFICATION_ERROR
- DF_NOP | DF_UMS,
+ // ED THROW_VERIFICATION_ERROR
+ DF_NOP | DF_UMS,
- // EE EXECUTE_INLINE
- DF_FORMAT_35C,
+ // EE EXECUTE_INLINE
+ DF_FORMAT_35C,
- // EF EXECUTE_INLINE_RANGE
- DF_FORMAT_3RC,
+ // EF EXECUTE_INLINE_RANGE
+ DF_FORMAT_3RC,
- // F0 INVOKE_OBJECT_INIT_RANGE
- DF_NOP | DF_NULL_CHK_0,
+ // F0 INVOKE_OBJECT_INIT_RANGE
+ DF_NOP | DF_NULL_CHK_0,
- // F1 RETURN_VOID_BARRIER
- DF_NOP,
+ // F1 RETURN_VOID_BARRIER
+ DF_NOP,
- // F2 IGET_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_0,
+ // F2 IGET_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_0,
- // F3 IGET_WIDE_QUICK
- DF_DA_WIDE | DF_UB | DF_NULL_CHK_0,
+ // F3 IGET_WIDE_QUICK
+ DF_DA_WIDE | DF_UB | DF_NULL_CHK_0,
- // F4 IGET_OBJECT_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_0,
+ // F4 IGET_OBJECT_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_0,
- // F5 IPUT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_1,
+ // F5 IPUT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_1,
- // F6 IPUT_WIDE_QUICK
- DF_UA_WIDE | DF_UB | DF_NULL_CHK_2,
+ // F6 IPUT_WIDE_QUICK
+ DF_UA_WIDE | DF_UB | DF_NULL_CHK_2,
- // F7 IPUT_OBJECT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_1,
+ // F7 IPUT_OBJECT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_1,
- // F8 INVOKE_VIRTUAL_QUICK
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+ // F8 INVOKE_VIRTUAL_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // F9 INVOKE_VIRTUAL_QUICK_RANGE
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+ // F9 INVOKE_VIRTUAL_QUICK_RANGE
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
- // FA INVOKE_SUPER_QUICK
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+ // FA INVOKE_SUPER_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // FB INVOKE_SUPER_QUICK_RANGE
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+ // FB INVOKE_SUPER_QUICK_RANGE
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
- // FC IPUT_OBJECT_VOLATILE
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_A | DF_CORE_B,
+ // FC IPUT_OBJECT_VOLATILE
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_CORE_A | DF_CORE_B,
- // FD SGET_OBJECT_VOLATILE
- DF_DA | DF_CORE_A | DF_UMS,
+ // FD SGET_OBJECT_VOLATILE
+ DF_DA | DF_CORE_A | DF_UMS,
- // FE SPUT_OBJECT_VOLATILE
- DF_UA | DF_CORE_A | DF_UMS,
+ // FE SPUT_OBJECT_VOLATILE
+ DF_UA | DF_CORE_A | DF_UMS,
- // FF UNUSED_FF
- DF_NOP,
+ // FF UNUSED_FF
+ DF_NOP,
- // Beginning of extended MIR opcodes
- // 100 MIR_PHI
- DF_PHI | DF_DA | DF_NULL_TRANSFER_N,
+ // Beginning of extended MIR opcodes
+ // 100 MIR_PHI
+ DF_PHI | DF_DA | DF_NULL_TRANSFER_N,
- // 101 MIR_COPY
- DF_DA | DF_UB | DF_IS_MOVE,
+ // 101 MIR_COPY
+ DF_DA | DF_UB | DF_IS_MOVE,
- // 102 MIR_FUSED_CMPL_FLOAT
- DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+ // 102 MIR_FUSED_CMPL_FLOAT
+ DF_UA | DF_UB | DF_FP_A | DF_FP_B,
- // 103 MIR_FUSED_CMPG_FLOAT
- DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+ // 103 MIR_FUSED_CMPG_FLOAT
+ DF_UA | DF_UB | DF_FP_A | DF_FP_B,
- // 104 MIR_FUSED_CMPL_DOUBLE
- DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // 104 MIR_FUSED_CMPL_DOUBLE
+ DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // 105 MIR_FUSED_CMPG_DOUBLE
- DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
+ // 105 MIR_FUSED_CMPG_DOUBLE
+ DF_UA_WIDE | DF_UB_WIDE | DF_FP_A | DF_FP_B,
- // 106 MIR_FUSED_CMP_LONG
- DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
+ // 106 MIR_FUSED_CMP_LONG
+ DF_UA_WIDE | DF_UB_WIDE | DF_CORE_A | DF_CORE_B,
- // 107 MIR_NOP
- DF_NOP,
+ // 107 MIR_NOP
+ DF_NOP,
- // 108 MIR_NULL_RANGE_UP_CHECK
- 0,
+ // 108 MIR_NULL_RANGE_UP_CHECK
+ 0,
- // 109 MIR_NULL_RANGE_DOWN_CHECK
- 0,
+ // 109 MIR_NULL_RANGE_DOWN_CHECK
+ 0,
- // 110 MIR_LOWER_BOUND
- 0,
+ // 110 MIR_LOWER_BOUND
+ 0,
};
/* Return the base virtual register for a SSA name */
int SRegToVReg(const CompilationUnit* cUnit, int ssaReg)
{
- DCHECK_LT(ssaReg, (int)cUnit->ssaBaseVRegs->numUsed);
- return GET_ELEM_N(cUnit->ssaBaseVRegs, int, ssaReg);
+ DCHECK_LT(ssaReg, (int)cUnit->ssaBaseVRegs->numUsed);
+ return GET_ELEM_N(cUnit->ssaBaseVRegs, int, ssaReg);
}
int SRegToSubscript(const CompilationUnit* cUnit, int ssaReg)
{
- DCHECK(ssaReg < (int)cUnit->ssaSubscripts->numUsed);
- return GET_ELEM_N(cUnit->ssaSubscripts, int, ssaReg);
+ DCHECK(ssaReg < (int)cUnit->ssaSubscripts->numUsed);
+ return GET_ELEM_N(cUnit->ssaSubscripts, int, ssaReg);
}
int getSSAUseCount(CompilationUnit* cUnit, int sReg)
{
- DCHECK(sReg < (int)cUnit->rawUseCounts.numUsed);
- return cUnit->rawUseCounts.elemList[sReg];
+ DCHECK(sReg < (int)cUnit->rawUseCounts.numUsed);
+ return cUnit->rawUseCounts.elemList[sReg];
}
char* oatGetDalvikDisassembly(CompilationUnit* cUnit,
const DecodedInstruction& insn, const char* note)
{
- char buffer[256];
- Instruction::Code opcode = insn.opcode;
- int dfAttributes = oatDataFlowAttributes[opcode];
- int flags;
- char* ret;
+ char buffer[256];
+ Instruction::Code opcode = insn.opcode;
+ int dfAttributes = oatDataFlowAttributes[opcode];
+ int flags;
+ char* ret;
- buffer[0] = 0;
- if ((int)opcode >= (int)kMirOpFirst) {
- if ((int)opcode == (int)kMirOpPhi) {
- strcpy(buffer, "PHI");
- } else {
- sprintf(buffer, "Opcode %#x", opcode);
- }
- flags = 0;
+ buffer[0] = 0;
+ if ((int)opcode >= (int)kMirOpFirst) {
+ if ((int)opcode == (int)kMirOpPhi) {
+ strcpy(buffer, "PHI");
} else {
- strcpy(buffer, Instruction::Name(opcode));
- flags = Instruction::Flags(opcode);
+ sprintf(buffer, "Opcode %#x", opcode);
}
+ flags = 0;
+ } else {
+ strcpy(buffer, Instruction::Name(opcode));
+ flags = Instruction::Flags(opcode);
+ }
- if (note)
- strcat(buffer, note);
+ if (note)
+ strcat(buffer, note);
- /* For branches, decode the instructions to print out the branch targets */
- if (flags & Instruction::kBranch) {
- Instruction::Format dalvikFormat = Instruction::FormatOf(insn.opcode);
- int offset = 0;
- switch (dalvikFormat) {
- case Instruction::k21t:
- snprintf(buffer + strlen(buffer), 256, " v%d,", insn.vA);
- offset = (int) insn.vB;
- break;
- case Instruction::k22t:
- snprintf(buffer + strlen(buffer), 256, " v%d, v%d,", insn.vA, insn.vB);
- offset = (int) insn.vC;
- break;
- case Instruction::k10t:
- case Instruction::k20t:
- case Instruction::k30t:
- offset = (int) insn.vA;
- break;
- default:
- LOG(FATAL) << "Unexpected branch format " << (int)dalvikFormat
- << " / opcode " << (int)opcode;
- }
- snprintf(buffer + strlen(buffer), 256, " (%c%x)",
- offset > 0 ? '+' : '-',
- offset > 0 ? offset : -offset);
- } else if (dfAttributes & DF_FORMAT_35C) {
- unsigned int i;
- for (i = 0; i < insn.vA; i++) {
- if (i != 0) strcat(buffer, ",");
- snprintf(buffer + strlen(buffer), 256, " v%d", insn.arg[i]);
- }
+ /* For branches, decode the instructions to print out the branch targets */
+ if (flags & Instruction::kBranch) {
+ Instruction::Format dalvikFormat = Instruction::FormatOf(insn.opcode);
+ int offset = 0;
+ switch (dalvikFormat) {
+ case Instruction::k21t:
+ snprintf(buffer + strlen(buffer), 256, " v%d,", insn.vA);
+ offset = (int) insn.vB;
+ break;
+ case Instruction::k22t:
+ snprintf(buffer + strlen(buffer), 256, " v%d, v%d,", insn.vA, insn.vB);
+ offset = (int) insn.vC;
+ break;
+ case Instruction::k10t:
+ case Instruction::k20t:
+ case Instruction::k30t:
+ offset = (int) insn.vA;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected branch format " << (int)dalvikFormat
+ << " / opcode " << (int)opcode;
}
- else if (dfAttributes & DF_FORMAT_3RC) {
- snprintf(buffer + strlen(buffer), 256,
- " v%d..v%d", insn.vC, insn.vC + insn.vA - 1);
+ snprintf(buffer + strlen(buffer), 256, " (%c%x)",
+ offset > 0 ? '+' : '-',
+ offset > 0 ? offset : -offset);
+ } else if (dfAttributes & DF_FORMAT_35C) {
+ unsigned int i;
+ for (i = 0; i < insn.vA; i++) {
+ if (i != 0) strcat(buffer, ",");
+ snprintf(buffer + strlen(buffer), 256, " v%d", insn.arg[i]);
}
- else {
- if (dfAttributes & DF_A_IS_REG) {
- snprintf(buffer + strlen(buffer), 256, " v%d", insn.vA);
- }
- if (dfAttributes & DF_B_IS_REG) {
- snprintf(buffer + strlen(buffer), 256, ", v%d", insn.vB);
- }
- else if ((int)opcode < (int)kMirOpFirst) {
- snprintf(buffer + strlen(buffer), 256, ", (#%d)", insn.vB);
- }
- if (dfAttributes & DF_C_IS_REG) {
- snprintf(buffer + strlen(buffer), 256, ", v%d", insn.vC);
- }
- else if ((int)opcode < (int)kMirOpFirst) {
- snprintf(buffer + strlen(buffer), 256, ", (#%d)", insn.vC);
- }
+ }
+ else if (dfAttributes & DF_FORMAT_3RC) {
+ snprintf(buffer + strlen(buffer), 256,
+ " v%d..v%d", insn.vC, insn.vC + insn.vA - 1);
+ } else {
+ if (dfAttributes & DF_A_IS_REG) {
+ snprintf(buffer + strlen(buffer), 256, " v%d", insn.vA);
}
- int length = strlen(buffer) + 1;
- ret = (char*)oatNew(cUnit, length, false, kAllocDFInfo);
- memcpy(ret, buffer, length);
- return ret;
+ if (dfAttributes & DF_B_IS_REG) {
+ snprintf(buffer + strlen(buffer), 256, ", v%d", insn.vB);
+ } else if ((int)opcode < (int)kMirOpFirst) {
+ snprintf(buffer + strlen(buffer), 256, ", (#%d)", insn.vB);
+ }
+ if (dfAttributes & DF_C_IS_REG) {
+ snprintf(buffer + strlen(buffer), 256, ", v%d", insn.vC);
+ } else if ((int)opcode < (int)kMirOpFirst) {
+ snprintf(buffer + strlen(buffer), 256, ", (#%d)", insn.vC);
+ }
+ }
+ int length = strlen(buffer) + 1;
+ ret = (char*)oatNew(cUnit, length, false, kAllocDFInfo);
+ memcpy(ret, buffer, length);
+ return ret;
}
char* getSSAName(const CompilationUnit* cUnit, int ssaReg, char* name)
{
- sprintf(name, "v%d_%d", SRegToVReg(cUnit, ssaReg),
- SRegToSubscript(cUnit, ssaReg));
- return name;
+ sprintf(name, "v%d_%d", SRegToVReg(cUnit, ssaReg),
+ SRegToSubscript(cUnit, ssaReg));
+ return name;
}
/*
@@ -949,156 +946,154 @@
*/
char* oatFullDisassembler(CompilationUnit* cUnit, const MIR* mir)
{
- char buffer[256];
- char operand0[32], operand1[32];
- const DecodedInstruction* insn = &mir->dalvikInsn;
- Instruction::Code opcode = insn->opcode;
- int dfAttributes = oatDataFlowAttributes[opcode];
- char* ret;
- int length;
+ char buffer[256];
+ char operand0[32], operand1[32];
+ const DecodedInstruction* insn = &mir->dalvikInsn;
+ Instruction::Code opcode = insn->opcode;
+ int dfAttributes = oatDataFlowAttributes[opcode];
+ char* ret;
+ int length;
- buffer[0] = 0;
- if (static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst)) {
- if (static_cast<int>(opcode) == static_cast<int>(kMirOpPhi)) {
- snprintf(buffer, 256, "PHI %s = (%s",
- getSSAName(cUnit, mir->ssaRep->defs[0], operand0),
- getSSAName(cUnit, mir->ssaRep->uses[0], operand1));
- int i;
- for (i = 1; i < mir->ssaRep->numUses; i++) {
- snprintf(buffer + strlen(buffer), 256, ", %s",
- getSSAName(cUnit, mir->ssaRep->uses[i], operand0));
- }
- snprintf(buffer + strlen(buffer), 256, ")");
- }
- else {
- sprintf(buffer, "Opcode %#x", opcode);
- }
- goto done;
+ buffer[0] = 0;
+ if (static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst)) {
+ if (static_cast<int>(opcode) == static_cast<int>(kMirOpPhi)) {
+ snprintf(buffer, 256, "PHI %s = (%s",
+ getSSAName(cUnit, mir->ssaRep->defs[0], operand0),
+ getSSAName(cUnit, mir->ssaRep->uses[0], operand1));
+ int i;
+ for (i = 1; i < mir->ssaRep->numUses; i++) {
+ snprintf(buffer + strlen(buffer), 256, ", %s",
+ getSSAName(cUnit, mir->ssaRep->uses[i], operand0));
+ }
+ snprintf(buffer + strlen(buffer), 256, ")");
} else {
- strcpy(buffer, Instruction::Name(opcode));
+ sprintf(buffer, "Opcode %#x", opcode);
}
+ goto done;
+ } else {
+ strcpy(buffer, Instruction::Name(opcode));
+ }
- /* For branches, decode the instructions to print out the branch targets */
- if (Instruction::Flags(insn->opcode) & Instruction::kBranch) {
- Instruction::Format dalvikFormat = Instruction::FormatOf(insn->opcode);
- int delta = 0;
+ /* For branches, decode the instructions to print out the branch targets */
+ if (Instruction::Flags(insn->opcode) & Instruction::kBranch) {
+ Instruction::Format dalvikFormat = Instruction::FormatOf(insn->opcode);
+ int delta = 0;
+ switch (dalvikFormat) {
+ case Instruction::k21t:
+ snprintf(buffer + strlen(buffer), 256, " %s, ",
+ getSSAName(cUnit, mir->ssaRep->uses[0], operand0));
+ delta = (int) insn->vB;
+ break;
+ case Instruction::k22t:
+ snprintf(buffer + strlen(buffer), 256, " %s, %s, ",
+ getSSAName(cUnit, mir->ssaRep->uses[0], operand0),
+ getSSAName(cUnit, mir->ssaRep->uses[1], operand1));
+ delta = (int) insn->vC;
+ break;
+ case Instruction::k10t:
+ case Instruction::k20t:
+ case Instruction::k30t:
+ delta = (int) insn->vA;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected branch format: " << (int)dalvikFormat;
+ }
+ snprintf(buffer + strlen(buffer), 256, " %04x",
+ mir->offset + delta);
+ } else if (dfAttributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) {
+ unsigned int i;
+ for (i = 0; i < insn->vA; i++) {
+ if (i != 0) strcat(buffer, ",");
+ snprintf(buffer + strlen(buffer), 256, " %s",
+ getSSAName(cUnit, mir->ssaRep->uses[i], operand0));
+ }
+ } else {
+ int udIdx;
+ if (mir->ssaRep->numDefs) {
+
+ for (udIdx = 0; udIdx < mir->ssaRep->numDefs; udIdx++) {
+ snprintf(buffer + strlen(buffer), 256, " %s",
+ getSSAName(cUnit, mir->ssaRep->defs[udIdx], operand0));
+ }
+ strcat(buffer, ",");
+ }
+ if (mir->ssaRep->numUses) {
+ /* No leading ',' for the first use */
+ snprintf(buffer + strlen(buffer), 256, " %s",
+ getSSAName(cUnit, mir->ssaRep->uses[0], operand0));
+ for (udIdx = 1; udIdx < mir->ssaRep->numUses; udIdx++) {
+ snprintf(buffer + strlen(buffer), 256, ", %s",
+ getSSAName(cUnit, mir->ssaRep->uses[udIdx], operand0));
+ }
+ }
+ if (static_cast<int>(opcode) < static_cast<int>(kMirOpFirst)) {
+ Instruction::Format dalvikFormat = Instruction::FormatOf(opcode);
switch (dalvikFormat) {
- case Instruction::k21t:
- snprintf(buffer + strlen(buffer), 256, " %s, ",
- getSSAName(cUnit, mir->ssaRep->uses[0], operand0));
- delta = (int) insn->vB;
- break;
- case Instruction::k22t:
- snprintf(buffer + strlen(buffer), 256, " %s, %s, ",
- getSSAName(cUnit, mir->ssaRep->uses[0], operand0),
- getSSAName(cUnit, mir->ssaRep->uses[1], operand1));
- delta = (int) insn->vC;
- break;
- case Instruction::k10t:
- case Instruction::k20t:
- case Instruction::k30t:
- delta = (int) insn->vA;
- break;
- default:
- LOG(FATAL) << "Unexpected branch format: " <<
- (int)dalvikFormat;
+ case Instruction::k11n: // op vA, #+B
+ case Instruction::k21s: // op vAA, #+BBBB
+ case Instruction::k21h: // op vAA, #+BBBB00000[00000000]
+ case Instruction::k31i: // op vAA, #+BBBBBBBB
+ case Instruction::k51l: // op vAA, #+BBBBBBBBBBBBBBBB
+ snprintf(buffer + strlen(buffer), 256, " #%#x", insn->vB);
+ break;
+ case Instruction::k21c: // op vAA, thing@BBBB
+ case Instruction::k31c: // op vAA, thing@BBBBBBBB
+ snprintf(buffer + strlen(buffer), 256, " @%#x", insn->vB);
+ break;
+ case Instruction::k22b: // op vAA, vBB, #+CC
+ case Instruction::k22s: // op vA, vB, #+CCCC
+ snprintf(buffer + strlen(buffer), 256, " #%#x", insn->vC);
+ break;
+ case Instruction::k22c: // op vA, vB, thing@CCCC
+ snprintf(buffer + strlen(buffer), 256, " @%#x", insn->vC);
+ break;
+ /* No need for special printing */
+ default:
+ break;
}
- snprintf(buffer + strlen(buffer), 256, " %04x",
- mir->offset + delta);
- } else if (dfAttributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) {
- unsigned int i;
- for (i = 0; i < insn->vA; i++) {
- if (i != 0) strcat(buffer, ",");
- snprintf(buffer + strlen(buffer), 256, " %s",
- getSSAName(cUnit, mir->ssaRep->uses[i], operand0));
- }
- } else {
- int udIdx;
- if (mir->ssaRep->numDefs) {
-
- for (udIdx = 0; udIdx < mir->ssaRep->numDefs; udIdx++) {
- snprintf(buffer + strlen(buffer), 256, " %s",
- getSSAName(cUnit, mir->ssaRep->defs[udIdx], operand0));
- }
- strcat(buffer, ",");
- }
- if (mir->ssaRep->numUses) {
- /* No leading ',' for the first use */
- snprintf(buffer + strlen(buffer), 256, " %s",
- getSSAName(cUnit, mir->ssaRep->uses[0], operand0));
- for (udIdx = 1; udIdx < mir->ssaRep->numUses; udIdx++) {
- snprintf(buffer + strlen(buffer), 256, ", %s",
- getSSAName(cUnit, mir->ssaRep->uses[udIdx], operand0));
- }
- }
- if (static_cast<int>(opcode) < static_cast<int>(kMirOpFirst)) {
- Instruction::Format dalvikFormat = Instruction::FormatOf(opcode);
- switch (dalvikFormat) {
- case Instruction::k11n: // op vA, #+B
- case Instruction::k21s: // op vAA, #+BBBB
- case Instruction::k21h: // op vAA, #+BBBB00000[00000000]
- case Instruction::k31i: // op vAA, #+BBBBBBBB
- case Instruction::k51l: // op vAA, #+BBBBBBBBBBBBBBBB
- snprintf(buffer + strlen(buffer), 256, " #%#x", insn->vB);
- break;
- case Instruction::k21c: // op vAA, thing@BBBB
- case Instruction::k31c: // op vAA, thing@BBBBBBBB
- snprintf(buffer + strlen(buffer), 256, " @%#x", insn->vB);
- break;
- case Instruction::k22b: // op vAA, vBB, #+CC
- case Instruction::k22s: // op vA, vB, #+CCCC
- snprintf(buffer + strlen(buffer), 256, " #%#x", insn->vC);
- break;
- case Instruction::k22c: // op vA, vB, thing@CCCC
- snprintf(buffer + strlen(buffer), 256, " @%#x", insn->vC);
- break;
- /* No need for special printing */
- default:
- break;
- }
- }
- }
+ }
+ }
done:
- length = strlen(buffer) + 1;
- ret = (char*) oatNew(cUnit, length, false, kAllocDFInfo);
- memcpy(ret, buffer, length);
- return ret;
+ length = strlen(buffer) + 1;
+ ret = (char*) oatNew(cUnit, length, false, kAllocDFInfo);
+ memcpy(ret, buffer, length);
+ return ret;
}
char* oatGetSSAString(CompilationUnit* cUnit, SSARepresentation* ssaRep)
{
- char buffer[256];
- char* ret;
- int i;
+ char buffer[256];
+ char* ret;
+ int i;
- buffer[0] = 0;
- for (i = 0; i < ssaRep->numDefs; i++) {
- int ssaReg = ssaRep->defs[i];
- sprintf(buffer + strlen(buffer), "s%d(v%d_%d) ", ssaReg,
- SRegToVReg(cUnit, ssaReg), SRegToSubscript(cUnit, ssaReg));
+ buffer[0] = 0;
+ for (i = 0; i < ssaRep->numDefs; i++) {
+ int ssaReg = ssaRep->defs[i];
+ sprintf(buffer + strlen(buffer), "s%d(v%d_%d) ", ssaReg,
+ SRegToVReg(cUnit, ssaReg), SRegToSubscript(cUnit, ssaReg));
+ }
+
+ if (ssaRep->numDefs) {
+ strcat(buffer, "<- ");
+ }
+
+ for (i = 0; i < ssaRep->numUses; i++) {
+ int len = strlen(buffer);
+ int ssaReg = ssaRep->uses[i];
+
+ if (snprintf(buffer + len, 250 - len, "s%d(v%d_%d) ", ssaReg,
+ SRegToVReg(cUnit, ssaReg),
+ SRegToSubscript(cUnit, ssaReg))) {
+ strcat(buffer, "...");
+ break;
}
+ }
- if (ssaRep->numDefs) {
- strcat(buffer, "<- ");
- }
-
- for (i = 0; i < ssaRep->numUses; i++) {
- int len = strlen(buffer);
- int ssaReg = ssaRep->uses[i];
-
- if (snprintf(buffer + len, 250 - len, "s%d(v%d_%d) ", ssaReg,
- SRegToVReg(cUnit, ssaReg),
- SRegToSubscript(cUnit, ssaReg))) {
- strcat(buffer, "...");
- break;
- }
- }
-
- int length = strlen(buffer) + 1;
- ret = (char*)oatNew(cUnit, length, false, kAllocDFInfo);
- memcpy(ret, buffer, length);
- return ret;
+ int length = strlen(buffer) + 1;
+ ret = (char*)oatNew(cUnit, length, false, kAllocDFInfo);
+ memcpy(ret, buffer, length);
+ return ret;
}
/* Any register that is used before being defined is considered live-in */
@@ -1106,17 +1101,17 @@
ArenaBitVector* defV, ArenaBitVector* liveInV,
int dalvikRegId)
{
- oatSetBit(cUnit, useV, dalvikRegId);
- if (!oatIsBitSet(defV, dalvikRegId)) {
- oatSetBit(cUnit, liveInV, dalvikRegId);
- }
+ oatSetBit(cUnit, useV, dalvikRegId);
+ if (!oatIsBitSet(defV, dalvikRegId)) {
+ oatSetBit(cUnit, liveInV, dalvikRegId);
+ }
}
/* Mark a reg as being defined */
inline void handleDef(CompilationUnit* cUnit, ArenaBitVector* defV,
int dalvikRegId)
{
- oatSetBit(cUnit, defV, dalvikRegId);
+ oatSetBit(cUnit, defV, dalvikRegId);
}
/*
@@ -1125,567 +1120,556 @@
*/
bool oatFindLocalLiveIn(CompilationUnit* cUnit, BasicBlock* bb)
{
- MIR* mir;
- ArenaBitVector *useV, *defV, *liveInV;
+ MIR* mir;
+ ArenaBitVector *useV, *defV, *liveInV;
- if (bb->dataFlowInfo == NULL) return false;
+ if (bb->dataFlowInfo == NULL) return false;
- useV = bb->dataFlowInfo->useV =
- oatAllocBitVector(cUnit, cUnit->numDalvikRegisters, false, kBitMapUse);
- defV = bb->dataFlowInfo->defV =
- oatAllocBitVector(cUnit, cUnit->numDalvikRegisters, false, kBitMapDef);
- liveInV = bb->dataFlowInfo->liveInV =
- oatAllocBitVector(cUnit, cUnit->numDalvikRegisters, false,
- kBitMapLiveIn);
+ useV = bb->dataFlowInfo->useV =
+ oatAllocBitVector(cUnit, cUnit->numDalvikRegisters, false, kBitMapUse);
+ defV = bb->dataFlowInfo->defV =
+ oatAllocBitVector(cUnit, cUnit->numDalvikRegisters, false, kBitMapDef);
+ liveInV = bb->dataFlowInfo->liveInV =
+ oatAllocBitVector(cUnit, cUnit->numDalvikRegisters, false,
+ kBitMapLiveIn);
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- int dfAttributes =
- oatDataFlowAttributes[mir->dalvikInsn.opcode];
- DecodedInstruction *dInsn = &mir->dalvikInsn;
+ for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ DecodedInstruction *dInsn = &mir->dalvikInsn;
- if (dfAttributes & DF_HAS_USES) {
- if (dfAttributes & DF_UA) {
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vA);
- } else if (dfAttributes & DF_UA_WIDE) {
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vA);
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vA+1);
- }
- if (dfAttributes & DF_UB) {
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vB);
- } else if (dfAttributes & DF_UB_WIDE) {
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vB);
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vB+1);
- }
- if (dfAttributes & DF_UC) {
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC);
- } else if (dfAttributes & DF_UC_WIDE) {
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC);
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC+1);
- }
- }
- if (dfAttributes & DF_FORMAT_35C) {
- for (unsigned int i = 0; i < dInsn->vA; i++) {
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->arg[i]);
- }
- }
- if (dfAttributes & DF_FORMAT_3RC) {
- for (unsigned int i = 0; i < dInsn->vA; i++) {
- handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC+i);
- }
- }
- if (dfAttributes & DF_HAS_DEFS) {
- handleDef(cUnit, defV, dInsn->vA);
- if (dfAttributes & DF_DA_WIDE) {
- handleDef(cUnit, defV, dInsn->vA+1);
- }
- }
+ if (dfAttributes & DF_HAS_USES) {
+ if (dfAttributes & DF_UA) {
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vA);
+ } else if (dfAttributes & DF_UA_WIDE) {
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vA);
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vA+1);
+ }
+ if (dfAttributes & DF_UB) {
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vB);
+ } else if (dfAttributes & DF_UB_WIDE) {
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vB);
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vB+1);
+ }
+ if (dfAttributes & DF_UC) {
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC);
+ } else if (dfAttributes & DF_UC_WIDE) {
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC);
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC+1);
+ }
}
- return true;
+ if (dfAttributes & DF_FORMAT_35C) {
+ for (unsigned int i = 0; i < dInsn->vA; i++) {
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->arg[i]);
+ }
+ }
+ if (dfAttributes & DF_FORMAT_3RC) {
+ for (unsigned int i = 0; i < dInsn->vA; i++) {
+ handleLiveInUse(cUnit, useV, defV, liveInV, dInsn->vC+i);
+ }
+ }
+ if (dfAttributes & DF_HAS_DEFS) {
+ handleDef(cUnit, defV, dInsn->vA);
+ if (dfAttributes & DF_DA_WIDE) {
+ handleDef(cUnit, defV, dInsn->vA+1);
+ }
+ }
+ }
+ return true;
}
int addNewSReg(CompilationUnit* cUnit, int vReg)
{
- // Compiler temps always have a subscript of 0
- int subscript = (vReg < 0) ? 0 : ++cUnit->SSALastDefs[vReg];
- int ssaReg = cUnit->numSSARegs++;
- oatInsertGrowableList(cUnit, cUnit->ssaBaseVRegs, vReg);
- oatInsertGrowableList(cUnit, cUnit->ssaSubscripts, subscript);
- DCHECK_EQ(cUnit->ssaBaseVRegs->numUsed, cUnit->ssaSubscripts->numUsed);
- return ssaReg;
+ // Compiler temps always have a subscript of 0
+ int subscript = (vReg < 0) ? 0 : ++cUnit->SSALastDefs[vReg];
+ int ssaReg = cUnit->numSSARegs++;
+ oatInsertGrowableList(cUnit, cUnit->ssaBaseVRegs, vReg);
+ oatInsertGrowableList(cUnit, cUnit->ssaSubscripts, subscript);
+ DCHECK_EQ(cUnit->ssaBaseVRegs->numUsed, cUnit->ssaSubscripts->numUsed);
+ return ssaReg;
}
/* Find out the latest SSA register for a given Dalvik register */
void handleSSAUse(CompilationUnit* cUnit, int* uses, int dalvikReg,
int regIndex)
{
- DCHECK((dalvikReg >= 0) && (dalvikReg < cUnit->numDalvikRegisters));
- uses[regIndex] = cUnit->vRegToSSAMap[dalvikReg];
+ DCHECK((dalvikReg >= 0) && (dalvikReg < cUnit->numDalvikRegisters));
+ uses[regIndex] = cUnit->vRegToSSAMap[dalvikReg];
}
/* Setup a new SSA register for a given Dalvik register */
void handleSSADef(CompilationUnit* cUnit, int* defs, int dalvikReg,
int regIndex)
{
- DCHECK((dalvikReg >= 0) && (dalvikReg < cUnit->numDalvikRegisters));
- int ssaReg = addNewSReg(cUnit, dalvikReg);
- cUnit->vRegToSSAMap[dalvikReg] = ssaReg;
- defs[regIndex] = ssaReg;
+ DCHECK((dalvikReg >= 0) && (dalvikReg < cUnit->numDalvikRegisters));
+ int ssaReg = addNewSReg(cUnit, dalvikReg);
+ cUnit->vRegToSSAMap[dalvikReg] = ssaReg;
+ defs[regIndex] = ssaReg;
}
/* Look up new SSA names for format_35c instructions */
void dataFlowSSAFormat35C(CompilationUnit* cUnit, MIR* mir)
{
- DecodedInstruction *dInsn = &mir->dalvikInsn;
- int numUses = dInsn->vA;
- int i;
+ DecodedInstruction *dInsn = &mir->dalvikInsn;
+ int numUses = dInsn->vA;
+ int i;
- mir->ssaRep->numUses = numUses;
- mir->ssaRep->uses = (int *)oatNew(cUnit, sizeof(int) * numUses, true,
+ mir->ssaRep->numUses = numUses;
+ mir->ssaRep->uses = (int *)oatNew(cUnit, sizeof(int) * numUses, true,
+ kAllocDFInfo);
+ // NOTE: will be filled in during type & size inference pass
+ mir->ssaRep->fpUse = (bool *)oatNew(cUnit, sizeof(bool) * numUses, true,
kAllocDFInfo);
- // NOTE: will be filled in during type & size inference pass
- mir->ssaRep->fpUse = (bool *)oatNew(cUnit, sizeof(bool) * numUses, true,
- kAllocDFInfo);
- for (i = 0; i < numUses; i++) {
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->arg[i], i);
- }
+ for (i = 0; i < numUses; i++) {
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->arg[i], i);
+ }
}
/* Look up new SSA names for format_3rc instructions */
void dataFlowSSAFormat3RC(CompilationUnit* cUnit, MIR* mir)
{
- DecodedInstruction *dInsn = &mir->dalvikInsn;
- int numUses = dInsn->vA;
- int i;
+ DecodedInstruction *dInsn = &mir->dalvikInsn;
+ int numUses = dInsn->vA;
+ int i;
- mir->ssaRep->numUses = numUses;
- mir->ssaRep->uses = (int *)oatNew(cUnit, sizeof(int) * numUses, true,
+ mir->ssaRep->numUses = numUses;
+ mir->ssaRep->uses = (int *)oatNew(cUnit, sizeof(int) * numUses, true,
+ kAllocDFInfo);
+ // NOTE: will be filled in during type & size inference pass
+ mir->ssaRep->fpUse = (bool *)oatNew(cUnit, sizeof(bool) * numUses, true,
kAllocDFInfo);
- // NOTE: will be filled in during type & size inference pass
- mir->ssaRep->fpUse = (bool *)oatNew(cUnit, sizeof(bool) * numUses, true,
- kAllocDFInfo);
- for (i = 0; i < numUses; i++) {
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC+i, i);
- }
+ for (i = 0; i < numUses; i++) {
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC+i, i);
+ }
}
/* Entry function to convert a block into SSA representation */
bool oatDoSSAConversion(CompilationUnit* cUnit, BasicBlock* bb)
{
- MIR* mir;
+ MIR* mir;
- if (bb->dataFlowInfo == NULL) return false;
+ if (bb->dataFlowInfo == NULL) return false;
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- mir->ssaRep = (struct SSARepresentation *)
- oatNew(cUnit, sizeof(SSARepresentation), true, kAllocDFInfo);
+ for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ mir->ssaRep = (struct SSARepresentation *)
+ oatNew(cUnit, sizeof(SSARepresentation), true, kAllocDFInfo);
- int dfAttributes =
- oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
- // If not a pseudo-op, note non-leaf or can throw
- if (static_cast<int>(mir->dalvikInsn.opcode) < static_cast<int>(kNumPackedOpcodes)) {
- int flags = Instruction::Flags(mir->dalvikInsn.opcode);
+ // If not a pseudo-op, note non-leaf or can throw
+ if (static_cast<int>(mir->dalvikInsn.opcode) <
+ static_cast<int>(kNumPackedOpcodes)) {
+ int flags = Instruction::Flags(mir->dalvikInsn.opcode);
- if (flags & Instruction::kThrow) {
- cUnit->attrs &= ~METHOD_IS_THROW_FREE;
- }
+ if (flags & Instruction::kThrow) {
+ cUnit->attrs &= ~METHOD_IS_THROW_FREE;
+ }
- if (flags & Instruction::kInvoke) {
- cUnit->attrs &= ~METHOD_IS_LEAF;
- }
- }
-
- int numUses = 0;
-
- if (dfAttributes & DF_FORMAT_35C) {
- dataFlowSSAFormat35C(cUnit, mir);
- continue;
- }
-
- if (dfAttributes & DF_FORMAT_3RC) {
- dataFlowSSAFormat3RC(cUnit, mir);
- continue;
- }
-
- if (dfAttributes & DF_HAS_USES) {
- if (dfAttributes & DF_UA) {
- numUses++;
- } else if (dfAttributes & DF_UA_WIDE) {
- numUses += 2;
- }
- if (dfAttributes & DF_UB) {
- numUses++;
- } else if (dfAttributes & DF_UB_WIDE) {
- numUses += 2;
- }
- if (dfAttributes & DF_UC) {
- numUses++;
- } else if (dfAttributes & DF_UC_WIDE) {
- numUses += 2;
- }
- }
-
- if (numUses) {
- mir->ssaRep->numUses = numUses;
- mir->ssaRep->uses = (int *)oatNew(cUnit, sizeof(int) * numUses,
- false, kAllocDFInfo);
- mir->ssaRep->fpUse = (bool *)oatNew(cUnit, sizeof(bool) * numUses,
- false, kAllocDFInfo);
- }
-
- int numDefs = 0;
-
- if (dfAttributes & DF_HAS_DEFS) {
- numDefs++;
- if (dfAttributes & DF_DA_WIDE) {
- numDefs++;
- }
- }
-
- if (numDefs) {
- mir->ssaRep->numDefs = numDefs;
- mir->ssaRep->defs = (int *)oatNew(cUnit, sizeof(int) * numDefs,
- false, kAllocDFInfo);
- mir->ssaRep->fpDef = (bool *)oatNew(cUnit, sizeof(bool) * numDefs,
- false, kAllocDFInfo);
- }
-
- DecodedInstruction *dInsn = &mir->dalvikInsn;
-
- if (dfAttributes & DF_HAS_USES) {
- numUses = 0;
- if (dfAttributes & DF_UA) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_A;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vA, numUses++);
- } else if (dfAttributes & DF_UA_WIDE) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_A;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vA, numUses++);
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_A;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vA+1, numUses++);
- }
- if (dfAttributes & DF_UB) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_B;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vB, numUses++);
- } else if (dfAttributes & DF_UB_WIDE) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_B;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vB, numUses++);
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_B;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vB+1, numUses++);
- }
- if (dfAttributes & DF_UC) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_C;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC, numUses++);
- } else if (dfAttributes & DF_UC_WIDE) {
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_C;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC, numUses++);
- mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_C;
- handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC+1, numUses++);
- }
- }
- if (dfAttributes & DF_HAS_DEFS) {
- mir->ssaRep->fpDef[0] = dfAttributes & DF_FP_A;
- handleSSADef(cUnit, mir->ssaRep->defs, dInsn->vA, 0);
- if (dfAttributes & DF_DA_WIDE) {
- mir->ssaRep->fpDef[1] = dfAttributes & DF_FP_A;
- handleSSADef(cUnit, mir->ssaRep->defs, dInsn->vA+1, 1);
- }
- }
+ if (flags & Instruction::kInvoke) {
+ cUnit->attrs &= ~METHOD_IS_LEAF;
+ }
}
- if (!cUnit->disableDataflow) {
- /*
- * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
- * input to PHI nodes can be derived from the snapshot of all
- * predecessor blocks.
- */
- bb->dataFlowInfo->vRegToSSAMap =
- (int *)oatNew(cUnit, sizeof(int) * cUnit->numDalvikRegisters, false,
- kAllocDFInfo);
+ int numUses = 0;
- memcpy(bb->dataFlowInfo->vRegToSSAMap, cUnit->vRegToSSAMap,
- sizeof(int) * cUnit->numDalvikRegisters);
+ if (dfAttributes & DF_FORMAT_35C) {
+ dataFlowSSAFormat35C(cUnit, mir);
+ continue;
}
- return true;
+
+ if (dfAttributes & DF_FORMAT_3RC) {
+ dataFlowSSAFormat3RC(cUnit, mir);
+ continue;
+ }
+
+ if (dfAttributes & DF_HAS_USES) {
+ if (dfAttributes & DF_UA) {
+ numUses++;
+ } else if (dfAttributes & DF_UA_WIDE) {
+ numUses += 2;
+ }
+ if (dfAttributes & DF_UB) {
+ numUses++;
+ } else if (dfAttributes & DF_UB_WIDE) {
+ numUses += 2;
+ }
+ if (dfAttributes & DF_UC) {
+ numUses++;
+ } else if (dfAttributes & DF_UC_WIDE) {
+ numUses += 2;
+ }
+ }
+
+ if (numUses) {
+ mir->ssaRep->numUses = numUses;
+ mir->ssaRep->uses = (int *)oatNew(cUnit, sizeof(int) * numUses,
+ false, kAllocDFInfo);
+ mir->ssaRep->fpUse = (bool *)oatNew(cUnit, sizeof(bool) * numUses,
+ false, kAllocDFInfo);
+ }
+
+ int numDefs = 0;
+
+ if (dfAttributes & DF_HAS_DEFS) {
+ numDefs++;
+ if (dfAttributes & DF_DA_WIDE) {
+ numDefs++;
+ }
+ }
+
+ if (numDefs) {
+ mir->ssaRep->numDefs = numDefs;
+ mir->ssaRep->defs = (int *)oatNew(cUnit, sizeof(int) * numDefs,
+ false, kAllocDFInfo);
+ mir->ssaRep->fpDef = (bool *)oatNew(cUnit, sizeof(bool) * numDefs,
+ false, kAllocDFInfo);
+ }
+
+ DecodedInstruction *dInsn = &mir->dalvikInsn;
+
+ if (dfAttributes & DF_HAS_USES) {
+ numUses = 0;
+ if (dfAttributes & DF_UA) {
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_A;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vA, numUses++);
+ } else if (dfAttributes & DF_UA_WIDE) {
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_A;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vA, numUses++);
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_A;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vA+1, numUses++);
+ }
+ if (dfAttributes & DF_UB) {
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_B;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vB, numUses++);
+ } else if (dfAttributes & DF_UB_WIDE) {
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_B;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vB, numUses++);
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_B;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vB+1, numUses++);
+ }
+ if (dfAttributes & DF_UC) {
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_C;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC, numUses++);
+ } else if (dfAttributes & DF_UC_WIDE) {
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_C;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC, numUses++);
+ mir->ssaRep->fpUse[numUses] = dfAttributes & DF_FP_C;
+ handleSSAUse(cUnit, mir->ssaRep->uses, dInsn->vC+1, numUses++);
+ }
+ }
+ if (dfAttributes & DF_HAS_DEFS) {
+ mir->ssaRep->fpDef[0] = dfAttributes & DF_FP_A;
+ handleSSADef(cUnit, mir->ssaRep->defs, dInsn->vA, 0);
+ if (dfAttributes & DF_DA_WIDE) {
+ mir->ssaRep->fpDef[1] = dfAttributes & DF_FP_A;
+ handleSSADef(cUnit, mir->ssaRep->defs, dInsn->vA+1, 1);
+ }
+ }
+ }
+
+ if (!cUnit->disableDataflow) {
+ /*
+ * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
+ * input to PHI nodes can be derived from the snapshot of all
+ * predecessor blocks.
+ */
+ bb->dataFlowInfo->vRegToSSAMap =
+ (int *)oatNew(cUnit, sizeof(int) * cUnit->numDalvikRegisters, false,
+ kAllocDFInfo);
+
+ memcpy(bb->dataFlowInfo->vRegToSSAMap, cUnit->vRegToSSAMap,
+ sizeof(int) * cUnit->numDalvikRegisters);
+ }
+ return true;
}
/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
void setConstant(CompilationUnit* cUnit, int ssaReg, int value)
{
- oatSetBit(cUnit, cUnit->isConstantV, ssaReg);
- cUnit->constantValues[ssaReg] = value;
+ oatSetBit(cUnit, cUnit->isConstantV, ssaReg);
+ cUnit->constantValues[ssaReg] = value;
}
bool oatDoConstantPropagation(CompilationUnit* cUnit, BasicBlock* bb)
{
- MIR* mir;
- ArenaBitVector *isConstantV = cUnit->isConstantV;
+ MIR* mir;
+ ArenaBitVector *isConstantV = cUnit->isConstantV;
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- int dfAttributes =
- oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
- DecodedInstruction *dInsn = &mir->dalvikInsn;
+ DecodedInstruction *dInsn = &mir->dalvikInsn;
- if (!(dfAttributes & DF_HAS_DEFS)) continue;
+ if (!(dfAttributes & DF_HAS_DEFS)) continue;
- /* Handle instructions that set up constants directly */
- if (dfAttributes & DF_SETS_CONST) {
- if (dfAttributes & DF_DA) {
- switch (dInsn->opcode) {
- case Instruction::CONST_4:
- case Instruction::CONST_16:
- case Instruction::CONST:
- setConstant(cUnit, mir->ssaRep->defs[0], dInsn->vB);
- break;
- case Instruction::CONST_HIGH16:
- setConstant(cUnit, mir->ssaRep->defs[0],
- dInsn->vB << 16);
- break;
- default:
- break;
- }
- } else if (dfAttributes & DF_DA_WIDE) {
- switch (dInsn->opcode) {
- case Instruction::CONST_WIDE_16:
- case Instruction::CONST_WIDE_32:
- setConstant(cUnit, mir->ssaRep->defs[0], dInsn->vB);
- setConstant(cUnit, mir->ssaRep->defs[1], 0);
- break;
- case Instruction::CONST_WIDE:
- setConstant(cUnit, mir->ssaRep->defs[0],
- (int) dInsn->vB_wide);
- setConstant(cUnit, mir->ssaRep->defs[1],
- (int) (dInsn->vB_wide >> 32));
- break;
- case Instruction::CONST_WIDE_HIGH16:
- setConstant(cUnit, mir->ssaRep->defs[0], 0);
- setConstant(cUnit, mir->ssaRep->defs[1],
- dInsn->vB << 16);
- break;
- default:
- break;
- }
+ /* Handle instructions that set up constants directly */
+ if (dfAttributes & DF_SETS_CONST) {
+ if (dfAttributes & DF_DA) {
+ switch (dInsn->opcode) {
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST:
+ setConstant(cUnit, mir->ssaRep->defs[0], dInsn->vB);
+ break;
+ case Instruction::CONST_HIGH16:
+ setConstant(cUnit, mir->ssaRep->defs[0], dInsn->vB << 16);
+ break;
+ default:
+ break;
+ }
+ } else if (dfAttributes & DF_DA_WIDE) {
+ switch (dInsn->opcode) {
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ setConstant(cUnit, mir->ssaRep->defs[0], dInsn->vB);
+ setConstant(cUnit, mir->ssaRep->defs[1], 0);
+ break;
+ case Instruction::CONST_WIDE:
+ setConstant(cUnit, mir->ssaRep->defs[0], (int) dInsn->vB_wide);
+ setConstant(cUnit, mir->ssaRep->defs[1],
+ (int) (dInsn->vB_wide >> 32));
+ break;
+ case Instruction::CONST_WIDE_HIGH16:
+ setConstant(cUnit, mir->ssaRep->defs[0], 0);
+ setConstant(cUnit, mir->ssaRep->defs[1], dInsn->vB << 16);
+ break;
+ default:
+ break;
}
- /* Handle instructions that set up constants directly */
- } else if (dfAttributes & DF_IS_MOVE) {
- int i;
+ }
+ /* Handle instructions that set up constants directly */
+ } else if (dfAttributes & DF_IS_MOVE) {
+ int i;
- for (i = 0; i < mir->ssaRep->numUses; i++) {
- if (!oatIsBitSet(isConstantV, mir->ssaRep->uses[i])) break;
- }
- /* Move a register holding a constant to another register */
- if (i == mir->ssaRep->numUses) {
- setConstant(cUnit, mir->ssaRep->defs[0],
- cUnit->constantValues[mir->ssaRep->uses[0]]);
- if (dfAttributes & DF_DA_WIDE) {
- setConstant(cUnit, mir->ssaRep->defs[1],
- cUnit->constantValues[mir->ssaRep->uses[1]]);
- }
- }
+ for (i = 0; i < mir->ssaRep->numUses; i++) {
+ if (!oatIsBitSet(isConstantV, mir->ssaRep->uses[i])) break;
+ }
+ /* Move a register holding a constant to another register */
+ if (i == mir->ssaRep->numUses) {
+ setConstant(cUnit, mir->ssaRep->defs[0],
+ cUnit->constantValues[mir->ssaRep->uses[0]]);
+ if (dfAttributes & DF_DA_WIDE) {
+ setConstant(cUnit, mir->ssaRep->defs[1],
+ cUnit->constantValues[mir->ssaRep->uses[1]]);
}
+ }
}
- /* TODO: implement code to handle arithmetic operations */
- return true;
+ }
+ /* TODO: implement code to handle arithmetic operations */
+ return true;
}
/* Setup the basic data structures for SSA conversion */
void oatInitializeSSAConversion(CompilationUnit* cUnit)
{
- int i;
- int numDalvikReg = cUnit->numDalvikRegisters;
+ int i;
+ int numDalvikReg = cUnit->numDalvikRegisters;
- cUnit->ssaBaseVRegs = (GrowableList *)oatNew(cUnit, sizeof(GrowableList),
- false, kAllocDFInfo);
- cUnit->ssaSubscripts = (GrowableList *)oatNew(cUnit, sizeof(GrowableList),
- false, kAllocDFInfo);
- // Create the ssa mappings, estimating the max size
- oatInitGrowableList(cUnit, cUnit->ssaBaseVRegs,
- numDalvikReg + cUnit->defCount + 128,
- kListSSAtoDalvikMap);
- oatInitGrowableList(cUnit, cUnit->ssaSubscripts,
- numDalvikReg + cUnit->defCount + 128,
- kListSSAtoDalvikMap);
- /*
- * Initial number of SSA registers is equal to the number of Dalvik
- * registers.
- */
- cUnit->numSSARegs = numDalvikReg;
+ cUnit->ssaBaseVRegs = (GrowableList *)oatNew(cUnit, sizeof(GrowableList),
+ false, kAllocDFInfo);
+ cUnit->ssaSubscripts = (GrowableList *)oatNew(cUnit, sizeof(GrowableList),
+ false, kAllocDFInfo);
+ // Create the ssa mappings, estimating the max size
+ oatInitGrowableList(cUnit, cUnit->ssaBaseVRegs,
+ numDalvikReg + cUnit->defCount + 128,
+ kListSSAtoDalvikMap);
+ oatInitGrowableList(cUnit, cUnit->ssaSubscripts,
+ numDalvikReg + cUnit->defCount + 128,
+ kListSSAtoDalvikMap);
+ /*
+ * Initial number of SSA registers is equal to the number of Dalvik
+ * registers.
+ */
+ cUnit->numSSARegs = numDalvikReg;
- /*
- * Initialize the SSA2Dalvik map list. For the first numDalvikReg elements,
- * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
- * into "(0 << 16) | i"
- */
- for (i = 0; i < numDalvikReg; i++) {
- oatInsertGrowableList(cUnit, cUnit->ssaBaseVRegs, i);
- oatInsertGrowableList(cUnit, cUnit->ssaSubscripts, 0);
- }
+ /*
+ * Initialize the SSA2Dalvik map list. For the first numDalvikReg elements,
+ * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
+ * into "(0 << 16) | i"
+ */
+ for (i = 0; i < numDalvikReg; i++) {
+ oatInsertGrowableList(cUnit, cUnit->ssaBaseVRegs, i);
+ oatInsertGrowableList(cUnit, cUnit->ssaSubscripts, 0);
+ }
- /*
- * Initialize the DalvikToSSAMap map. There is one entry for each
- * Dalvik register, and the SSA names for those are the same.
- */
- cUnit->vRegToSSAMap = (int *)oatNew(cUnit, sizeof(int) * numDalvikReg,
- false, kAllocDFInfo);
- /* Keep track of the higest def for each dalvik reg */
- cUnit->SSALastDefs = (int *)oatNew(cUnit, sizeof(int) * numDalvikReg,
- false, kAllocDFInfo);
+ /*
+ * Initialize the DalvikToSSAMap map. There is one entry for each
+ * Dalvik register, and the SSA names for those are the same.
+ */
+ cUnit->vRegToSSAMap = (int *)oatNew(cUnit, sizeof(int) * numDalvikReg,
+ false, kAllocDFInfo);
+ /* Keep track of the higest def for each dalvik reg */
+ cUnit->SSALastDefs = (int *)oatNew(cUnit, sizeof(int) * numDalvikReg,
+ false, kAllocDFInfo);
- for (i = 0; i < numDalvikReg; i++) {
- cUnit->vRegToSSAMap[i] = i;
- cUnit->SSALastDefs[i] = 0;
- }
+ for (i = 0; i < numDalvikReg; i++) {
+ cUnit->vRegToSSAMap[i] = i;
+ cUnit->SSALastDefs[i] = 0;
+ }
- /* Add ssa reg for Method* */
- cUnit->methodSReg = addNewSReg(cUnit, SSA_METHOD_BASEREG);
+ /* Add ssa reg for Method* */
+ cUnit->methodSReg = addNewSReg(cUnit, SSA_METHOD_BASEREG);
- /*
- * Allocate the BasicBlockDataFlow structure for the entry and code blocks
- */
- GrowableListIterator iterator;
+ /*
+ * Allocate the BasicBlockDataFlow structure for the entry and code blocks
+ */
+ GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->blockList, &iterator);
+ oatGrowableListIteratorInit(&cUnit->blockList, &iterator);
- while (true) {
- BasicBlock* bb = (BasicBlock *) oatGrowableListIteratorNext(&iterator);
- if (bb == NULL) break;
- if (bb->hidden == true) continue;
- if (bb->blockType == kDalvikByteCode ||
- bb->blockType == kEntryBlock ||
- bb->blockType == kExitBlock) {
- bb->dataFlowInfo = (BasicBlockDataFlow *)
- oatNew(cUnit, sizeof(BasicBlockDataFlow),
- true, kAllocDFInfo);
- }
- }
+ while (true) {
+ BasicBlock* bb = (BasicBlock *) oatGrowableListIteratorNext(&iterator);
+ if (bb == NULL) break;
+ if (bb->hidden == true) continue;
+ if (bb->blockType == kDalvikByteCode ||
+ bb->blockType == kEntryBlock ||
+ bb->blockType == kExitBlock) {
+ bb->dataFlowInfo = (BasicBlockDataFlow *)
+ oatNew(cUnit, sizeof(BasicBlockDataFlow), true, kAllocDFInfo);
+ }
+ }
}
/* Clear the visited flag for each BB */
bool oatClearVisitedFlag(struct CompilationUnit* cUnit, struct BasicBlock* bb)
{
- bb->visited = false;
- return true;
+ bb->visited = false;
+ return true;
}
void oatDataFlowAnalysisDispatcher(CompilationUnit* cUnit,
- bool (*func)(CompilationUnit*, BasicBlock*),
- DataFlowAnalysisMode dfaMode,
- bool isIterative)
+ bool (*func)(CompilationUnit*, BasicBlock*),
+ DataFlowAnalysisMode dfaMode,
+ bool isIterative)
{
- bool change = true;
+ bool change = true;
- while (change) {
- change = false;
+ while (change) {
+ change = false;
- switch (dfaMode) {
- /* Scan all blocks and perform the operations specified in func */
- case kAllNodes:
- {
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->blockList, &iterator);
- while (true) {
- BasicBlock* bb =
- (BasicBlock *) oatGrowableListIteratorNext(&iterator);
- if (bb == NULL) break;
- if (bb->hidden == true) continue;
- change |= (*func)(cUnit, bb);
- }
- }
- break;
- /* Scan reachable blocks and perform the ops specified in func. */
- case kReachableNodes:
- {
- int numReachableBlocks = cUnit->numReachableBlocks;
- int idx;
- const GrowableList *blockList = &cUnit->blockList;
-
- for (idx = 0; idx < numReachableBlocks; idx++) {
- int blockIdx = cUnit->dfsOrder.elemList[idx];
- BasicBlock* bb =
- (BasicBlock *) oatGrowableListGetElement(blockList,
- blockIdx);
- change |= (*func)(cUnit, bb);
- }
- }
- break;
-
- /* Scan reachable blocks by pre-order dfs and invoke func on each. */
- case kPreOrderDFSTraversal:
- {
- int numReachableBlocks = cUnit->numReachableBlocks;
- int idx;
- const GrowableList *blockList = &cUnit->blockList;
-
- for (idx = 0; idx < numReachableBlocks; idx++) {
- int dfsIdx = cUnit->dfsOrder.elemList[idx];
- BasicBlock* bb =
- (BasicBlock *) oatGrowableListGetElement(blockList,
- dfsIdx);
- change |= (*func)(cUnit, bb);
- }
- }
- break;
- /* Scan reachable blocks post-order dfs and invoke func on each. */
- case kPostOrderDFSTraversal:
- {
- int numReachableBlocks = cUnit->numReachableBlocks;
- int idx;
- const GrowableList *blockList = &cUnit->blockList;
-
- for (idx = numReachableBlocks - 1; idx >= 0; idx--) {
- int dfsIdx = cUnit->dfsOrder.elemList[idx];
- BasicBlock* bb =
- (BasicBlock *) oatGrowableListGetElement(blockList,
- dfsIdx);
- change |= (*func)(cUnit, bb);
- }
- }
- break;
- /* Scan reachable post-order dom tree and invoke func on each. */
- case kPostOrderDOMTraversal:
- {
- int numReachableBlocks = cUnit->numReachableBlocks;
- int idx;
- const GrowableList *blockList = &cUnit->blockList;
-
- for (idx = 0; idx < numReachableBlocks; idx++) {
- int domIdx = cUnit->domPostOrderTraversal.elemList[idx];
- BasicBlock* bb =
- (BasicBlock *) oatGrowableListGetElement(blockList,
- domIdx);
- change |= (*func)(cUnit, bb);
- }
- }
- break;
- /* Scan reachable blocks reverse post-order dfs, invoke func on each */
- case kReversePostOrderTraversal:
- {
- int numReachableBlocks = cUnit->numReachableBlocks;
- int idx;
- const GrowableList *blockList = &cUnit->blockList;
-
- for (idx = numReachableBlocks - 1; idx >= 0; idx--) {
- int revIdx = cUnit->dfsPostOrder.elemList[idx];
- BasicBlock* bb =
- (BasicBlock *) oatGrowableListGetElement(blockList,
- revIdx);
- change |= (*func)(cUnit, bb);
- }
- }
- break;
- default:
- LOG(FATAL) << "Unknown traversal mode " << (int)dfaMode;
+ switch (dfaMode) {
+ /* Scan all blocks and perform the operations specified in func */
+ case kAllNodes:
+ {
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&cUnit->blockList, &iterator);
+ while (true) {
+ BasicBlock* bb =
+ (BasicBlock *) oatGrowableListIteratorNext(&iterator);
+ if (bb == NULL) break;
+ if (bb->hidden == true) continue;
+ change |= (*func)(cUnit, bb);
+ }
}
- /* If isIterative is false, exit the loop after the first iteration */
- change &= isIterative;
+ break;
+ /* Scan reachable blocks and perform the ops specified in func. */
+ case kReachableNodes:
+ {
+ int numReachableBlocks = cUnit->numReachableBlocks;
+ int idx;
+ const GrowableList *blockList = &cUnit->blockList;
+
+ for (idx = 0; idx < numReachableBlocks; idx++) {
+ int blockIdx = cUnit->dfsOrder.elemList[idx];
+ BasicBlock* bb = (BasicBlock *)
+ oatGrowableListGetElement(blockList, blockIdx);
+ change |= (*func)(cUnit, bb);
+ }
+ }
+ break;
+
+ /* Scan reachable blocks by pre-order dfs and invoke func on each. */
+ case kPreOrderDFSTraversal:
+ {
+ int numReachableBlocks = cUnit->numReachableBlocks;
+ int idx;
+ const GrowableList *blockList = &cUnit->blockList;
+
+ for (idx = 0; idx < numReachableBlocks; idx++) {
+ int dfsIdx = cUnit->dfsOrder.elemList[idx];
+ BasicBlock* bb = (BasicBlock *)
+ oatGrowableListGetElement(blockList, dfsIdx);
+ change |= (*func)(cUnit, bb);
+ }
+ }
+ break;
+ /* Scan reachable blocks post-order dfs and invoke func on each. */
+ case kPostOrderDFSTraversal:
+ {
+ int numReachableBlocks = cUnit->numReachableBlocks;
+ int idx;
+ const GrowableList *blockList = &cUnit->blockList;
+
+ for (idx = numReachableBlocks - 1; idx >= 0; idx--) {
+ int dfsIdx = cUnit->dfsOrder.elemList[idx];
+ BasicBlock* bb = (BasicBlock *)
+ oatGrowableListGetElement(blockList, dfsIdx);
+ change |= (*func)(cUnit, bb);
+ }
+ }
+ break;
+ /* Scan reachable post-order dom tree and invoke func on each. */
+ case kPostOrderDOMTraversal:
+ {
+ int numReachableBlocks = cUnit->numReachableBlocks;
+ int idx;
+ const GrowableList *blockList = &cUnit->blockList;
+
+ for (idx = 0; idx < numReachableBlocks; idx++) {
+ int domIdx = cUnit->domPostOrderTraversal.elemList[idx];
+ BasicBlock* bb = (BasicBlock *)
+ oatGrowableListGetElement(blockList, domIdx);
+ change |= (*func)(cUnit, bb);
+ }
+ }
+ break;
+ /* Scan reachable blocks reverse post-order dfs, invoke func on each */
+ case kReversePostOrderTraversal:
+ {
+ int numReachableBlocks = cUnit->numReachableBlocks;
+ int idx;
+ const GrowableList *blockList = &cUnit->blockList;
+
+ for (idx = numReachableBlocks - 1; idx >= 0; idx--) {
+ int revIdx = cUnit->dfsPostOrder.elemList[idx];
+ BasicBlock* bb = (BasicBlock *)
+ oatGrowableListGetElement(blockList, revIdx);
+ change |= (*func)(cUnit, bb);
+ }
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unknown traversal mode " << (int)dfaMode;
}
+ /* If isIterative is false, exit the loop after the first iteration */
+ change &= isIterative;
+ }
}
/* Advance to next strictly dominated MIR node in an extended basic block */
-MIR* advanceMIR(CompilationUnit* cUnit, BasicBlock** pBb, MIR* mir, ArenaBitVector* bv,
- bool clearMark) {
- BasicBlock* bb = *pBb;
- if (mir != NULL) {
- mir = mir->next;
- if (mir == NULL) {
- bb = bb->fallThrough;
- if ((bb == NULL) || bb->predecessors->numUsed != 1) {
- mir = NULL;
- } else {
- if (bv) {
- oatSetBit(cUnit, bv, bb->id);
- }
- *pBb = bb;
- mir = bb->firstMIRInsn;
- }
+MIR* advanceMIR(CompilationUnit* cUnit, BasicBlock** pBb, MIR* mir,
+ ArenaBitVector* bv, bool clearMark) {
+ BasicBlock* bb = *pBb;
+ if (mir != NULL) {
+ mir = mir->next;
+ if (mir == NULL) {
+ bb = bb->fallThrough;
+ if ((bb == NULL) || bb->predecessors->numUsed != 1) {
+ mir = NULL;
+ } else {
+ if (bv) {
+ oatSetBit(cUnit, bv, bb->id);
}
+ *pBb = bb;
+ mir = bb->firstMIRInsn;
+ }
}
- if (mir && clearMark) {
- mir->optimizationFlags &= ~MIR_MARK;
- }
- return mir;
+ }
+ if (mir && clearMark) {
+ mir->optimizationFlags &= ~MIR_MARK;
+ }
+ return mir;
}
/*
@@ -1696,563 +1680,562 @@
* used, a move-result may not be present.
*/
MIR* oatFindMoveResult(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- bool wide)
+ bool wide)
{
- BasicBlock* tbb = bb;
- mir = advanceMIR(cUnit, &tbb, mir, NULL, false);
- while (mir != NULL) {
- if (!wide && mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
- break;
- }
- if (wide && mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE) {
- break;
- }
- // Keep going if pseudo op, otherwise terminate
- if (mir->dalvikInsn.opcode < static_cast<Instruction::Code>(kNumPackedOpcodes)) {
- mir = NULL;
- } else {
- mir = advanceMIR(cUnit, &tbb, mir, NULL, false);
- }
+ BasicBlock* tbb = bb;
+ mir = advanceMIR(cUnit, &tbb, mir, NULL, false);
+ while (mir != NULL) {
+ if (!wide && mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
+ break;
}
- return mir;
+ if (wide && mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE) {
+ break;
+ }
+ // Keep going if pseudo op, otherwise terminate
+ if (mir->dalvikInsn.opcode <
+ static_cast<Instruction::Code>(kNumPackedOpcodes)) {
+ mir = NULL;
+ } else {
+ mir = advanceMIR(cUnit, &tbb, mir, NULL, false);
+ }
+ }
+ return mir;
}
void squashDupRangeChecks(CompilationUnit* cUnit, BasicBlock** pBp, MIR* mir,
- int arraySreg, int indexSreg)
+ int arraySreg, int indexSreg)
{
- while (true) {
- mir = advanceMIR(cUnit, pBp, mir, NULL, false);
- if (!mir) {
- break;
- }
- if ((mir->ssaRep == NULL) ||
- (mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
- continue;
- }
- int checkArray = INVALID_SREG;
- int checkIndex = INVALID_SREG;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::AGET:
- case Instruction::AGET_OBJECT:
- case Instruction::AGET_BOOLEAN:
- case Instruction::AGET_BYTE:
- case Instruction::AGET_CHAR:
- case Instruction::AGET_SHORT:
- case Instruction::AGET_WIDE:
- checkArray = mir->ssaRep->uses[0];
- checkIndex = mir->ssaRep->uses[1];
- break;
- break;
- case Instruction::APUT:
- case Instruction::APUT_OBJECT:
- case Instruction::APUT_SHORT:
- case Instruction::APUT_CHAR:
- case Instruction::APUT_BYTE:
- case Instruction::APUT_BOOLEAN:
- checkArray = mir->ssaRep->uses[1];
- checkIndex = mir->ssaRep->uses[2];
- break;
- case Instruction::APUT_WIDE:
- checkArray = mir->ssaRep->uses[2];
- checkIndex = mir->ssaRep->uses[3];
- default:
- break;
- }
- if (checkArray == INVALID_SREG) {
- continue;
- }
- if ((arraySreg == checkArray) && (indexSreg == checkIndex)) {
- if (cUnit->printMe) {
- LOG(INFO) << "Squashing range check @ 0x" << std::hex
- << mir->offset;
- }
- mir->optimizationFlags |= MIR_IGNORE_RANGE_CHECK;
- }
+ while (true) {
+ mir = advanceMIR(cUnit, pBp, mir, NULL, false);
+ if (!mir) {
+ break;
}
+ if ((mir->ssaRep == NULL) ||
+ (mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+ continue;
+ }
+ int checkArray = INVALID_SREG;
+ int checkIndex = INVALID_SREG;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::AGET:
+ case Instruction::AGET_OBJECT:
+ case Instruction::AGET_BOOLEAN:
+ case Instruction::AGET_BYTE:
+ case Instruction::AGET_CHAR:
+ case Instruction::AGET_SHORT:
+ case Instruction::AGET_WIDE:
+ checkArray = mir->ssaRep->uses[0];
+ checkIndex = mir->ssaRep->uses[1];
+ break;
+ case Instruction::APUT:
+ case Instruction::APUT_OBJECT:
+ case Instruction::APUT_SHORT:
+ case Instruction::APUT_CHAR:
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_BOOLEAN:
+ checkArray = mir->ssaRep->uses[1];
+ checkIndex = mir->ssaRep->uses[2];
+ break;
+ case Instruction::APUT_WIDE:
+ checkArray = mir->ssaRep->uses[2];
+ checkIndex = mir->ssaRep->uses[3];
+ default:
+ break;
+ }
+ if (checkArray == INVALID_SREG) {
+ continue;
+ }
+ if ((arraySreg == checkArray) && (indexSreg == checkIndex)) {
+ if (cUnit->printMe) {
+ LOG(INFO) << "Squashing range check @ 0x" << std::hex << mir->offset;
+ }
+ mir->optimizationFlags |= MIR_IGNORE_RANGE_CHECK;
+ }
+ }
}
/* Allocate a compiler temp, return Sreg. Reuse existing if no conflict */
int allocCompilerTempSreg(CompilationUnit* cUnit, ArenaBitVector* bv)
{
- for (int i = 0; i < cUnit->numCompilerTemps; i++) {
- CompilerTemp* ct = (CompilerTemp*)cUnit->compilerTemps.elemList[i];
- ArenaBitVector* tBv = ct->bv;
- if (!oatTestBitVectors(bv, tBv)) {
- // Combine live maps and reuse existing temp
- oatUnifyBitVectors(tBv, tBv, bv);
- return ct->sReg;
- }
+ for (int i = 0; i < cUnit->numCompilerTemps; i++) {
+ CompilerTemp* ct = (CompilerTemp*)cUnit->compilerTemps.elemList[i];
+ ArenaBitVector* tBv = ct->bv;
+ if (!oatTestBitVectors(bv, tBv)) {
+ // Combine live maps and reuse existing temp
+ oatUnifyBitVectors(tBv, tBv, bv);
+ return ct->sReg;
}
+ }
- // Create a new compiler temp & associated live bitmap
- CompilerTemp* ct = (CompilerTemp*)oatNew(cUnit, sizeof(CompilerTemp),
- true, kAllocMisc);
- ArenaBitVector *nBv = oatAllocBitVector(cUnit, cUnit->numBlocks, true,
- kBitMapMisc);
- oatCopyBitVector(nBv, bv);
- ct->bv = nBv;
- ct->sReg = addNewSReg(cUnit, SSA_CTEMP_BASEREG - cUnit->numCompilerTemps);
- cUnit->numCompilerTemps++;
- oatInsertGrowableList(cUnit, &cUnit->compilerTemps, (intptr_t)ct);
- DCHECK_EQ(cUnit->numCompilerTemps, (int)cUnit->compilerTemps.numUsed);
- return ct->sReg;
+ // Create a new compiler temp & associated live bitmap
+ CompilerTemp* ct = (CompilerTemp*)oatNew(cUnit, sizeof(CompilerTemp),
+ true, kAllocMisc);
+ ArenaBitVector *nBv = oatAllocBitVector(cUnit, cUnit->numBlocks, true,
+ kBitMapMisc);
+ oatCopyBitVector(nBv, bv);
+ ct->bv = nBv;
+ ct->sReg = addNewSReg(cUnit, SSA_CTEMP_BASEREG - cUnit->numCompilerTemps);
+ cUnit->numCompilerTemps++;
+ oatInsertGrowableList(cUnit, &cUnit->compilerTemps, (intptr_t)ct);
+ DCHECK_EQ(cUnit->numCompilerTemps, (int)cUnit->compilerTemps.numUsed);
+ return ct->sReg;
}
/* Creata a new MIR node for a new pseudo op. */
-MIR* rawMIR(CompilationUnit* cUnit, Instruction::Code opcode, int defs, int uses)
+MIR* rawMIR(CompilationUnit* cUnit, Instruction::Code opcode, int defs,
+ int uses)
{
- MIR* res = (MIR*)oatNew( cUnit, sizeof(MIR), true, kAllocMIR);
- res->ssaRep =(struct SSARepresentation *)
- oatNew(cUnit, sizeof(SSARepresentation), true, kAllocDFInfo);
- if (uses) {
- res->ssaRep->numUses = uses;
- res->ssaRep->uses = (int*)oatNew(cUnit, sizeof(int) * uses, false, kAllocDFInfo);
- }
- if (defs) {
- res->ssaRep->numDefs = defs;
- res->ssaRep->defs = (int*)oatNew(cUnit, sizeof(int) * defs, false, kAllocDFInfo);
- res->ssaRep->fpDef = (bool*)oatNew(cUnit, sizeof(bool) * defs, true, kAllocDFInfo);
- }
- res->dalvikInsn.opcode = opcode;
- return res;
+ MIR* res = (MIR*)oatNew( cUnit, sizeof(MIR), true, kAllocMIR);
+ res->ssaRep =(struct SSARepresentation *)
+ oatNew(cUnit, sizeof(SSARepresentation), true, kAllocDFInfo);
+ if (uses) {
+ res->ssaRep->numUses = uses;
+ res->ssaRep->uses = (int*)oatNew(cUnit, sizeof(int) * uses, false,
+ kAllocDFInfo);
+ }
+ if (defs) {
+ res->ssaRep->numDefs = defs;
+ res->ssaRep->defs = (int*)oatNew(cUnit, sizeof(int) * defs, false,
+ kAllocDFInfo);
+ res->ssaRep->fpDef = (bool*)oatNew(cUnit, sizeof(bool) * defs, true,
+ kAllocDFInfo);
+ }
+ res->dalvikInsn.opcode = opcode;
+ return res;
}
/* Do some MIR-level basic block optimizations */
bool basicBlockOpt(CompilationUnit* cUnit, BasicBlock* bb)
{
- int numTemps = 0;
+ int numTemps = 0;
- for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
- // Look for interesting opcodes, skip otherwise
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- switch (opcode) {
- case Instruction::AGET:
- case Instruction::AGET_OBJECT:
- case Instruction::AGET_BOOLEAN:
- case Instruction::AGET_BYTE:
- case Instruction::AGET_CHAR:
- case Instruction::AGET_SHORT:
- case Instruction::AGET_WIDE:
- if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
- int arrSreg = mir->ssaRep->uses[0];
- int idxSreg = mir->ssaRep->uses[1];
- BasicBlock* tbb = bb;
- squashDupRangeChecks(cUnit, &tbb, mir, arrSreg, idxSreg);
- }
- break;
- case Instruction::APUT:
- case Instruction::APUT_OBJECT:
- case Instruction::APUT_SHORT:
- case Instruction::APUT_CHAR:
- case Instruction::APUT_BYTE:
- case Instruction::APUT_BOOLEAN:
- case Instruction::APUT_WIDE:
- if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
- int start = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
- int arrSreg = mir->ssaRep->uses[start];
- int idxSreg = mir->ssaRep->uses[start + 1];
- BasicBlock* tbb = bb;
- squashDupRangeChecks(cUnit, &tbb, mir, arrSreg, idxSreg);
- }
- break;
- case Instruction::CMPL_FLOAT:
- case Instruction::CMPL_DOUBLE:
- case Instruction::CMPG_FLOAT:
- case Instruction::CMPG_DOUBLE:
- case Instruction::CMP_LONG:
- if (mir->next != NULL) {
- MIR* mirNext = mir->next;
- Instruction::Code brOpcode = mirNext->dalvikInsn.opcode;
- ConditionCode ccode = kCondNv;
- switch(brOpcode) {
- case Instruction::IF_EQZ:
- ccode = kCondEq;
- break;
- case Instruction::IF_NEZ:
- ccode = kCondNe;
- break;
- case Instruction::IF_LTZ:
- ccode = kCondLt;
- break;
- case Instruction::IF_GEZ:
- ccode = kCondGe;
- break;
- case Instruction::IF_GTZ:
- ccode = kCondGt;
- break;
- case Instruction::IF_LEZ:
- ccode = kCondLe;
- break;
- default:
- break;
- }
- // Make sure result of cmp is used by next insn and nowhere else
- if ((ccode != kCondNv) &&
- (mir->ssaRep->defs[0] == mirNext->ssaRep->uses[0]) &&
- (getSSAUseCount(cUnit, mir->ssaRep->defs[0]) == 1)) {
- mirNext->dalvikInsn.arg[0] = ccode;
- switch(opcode) {
- case Instruction::CMPL_FLOAT:
- mirNext->dalvikInsn.opcode =
- static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
- break;
- case Instruction::CMPL_DOUBLE:
- mirNext->dalvikInsn.opcode =
- static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
- break;
- case Instruction::CMPG_FLOAT:
- mirNext->dalvikInsn.opcode =
- static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
- break;
- case Instruction::CMPG_DOUBLE:
- mirNext->dalvikInsn.opcode =
- static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
- break;
- case Instruction::CMP_LONG:
- mirNext->dalvikInsn.opcode =
- static_cast<Instruction::Code>(kMirOpFusedCmpLong);
- break;
- default: LOG(ERROR) << "Unexpected opcode: " << (int)opcode;
- }
- mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
- mirNext->ssaRep->numUses = mir->ssaRep->numUses;
- mirNext->ssaRep->uses = mir->ssaRep->uses;
- mirNext->ssaRep->fpUse = mir->ssaRep->fpUse;
- mirNext->ssaRep->numDefs = 0;
- mir->ssaRep->numUses = 0;
- mir->ssaRep->numDefs = 0;
- }
- }
- break;
- default:
- break;
+ for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ // Look for interesting opcodes, skip otherwise
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::AGET:
+ case Instruction::AGET_OBJECT:
+ case Instruction::AGET_BOOLEAN:
+ case Instruction::AGET_BYTE:
+ case Instruction::AGET_CHAR:
+ case Instruction::AGET_SHORT:
+ case Instruction::AGET_WIDE:
+ if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+ int arrSreg = mir->ssaRep->uses[0];
+ int idxSreg = mir->ssaRep->uses[1];
+ BasicBlock* tbb = bb;
+ squashDupRangeChecks(cUnit, &tbb, mir, arrSreg, idxSreg);
}
+ break;
+ case Instruction::APUT:
+ case Instruction::APUT_OBJECT:
+ case Instruction::APUT_SHORT:
+ case Instruction::APUT_CHAR:
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_BOOLEAN:
+ case Instruction::APUT_WIDE:
+ if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+ int start = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
+ int arrSreg = mir->ssaRep->uses[start];
+ int idxSreg = mir->ssaRep->uses[start + 1];
+ BasicBlock* tbb = bb;
+ squashDupRangeChecks(cUnit, &tbb, mir, arrSreg, idxSreg);
+ }
+ break;
+ case Instruction::CMPL_FLOAT:
+ case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPG_FLOAT:
+ case Instruction::CMPG_DOUBLE:
+ case Instruction::CMP_LONG:
+ if (mir->next != NULL) {
+ MIR* mirNext = mir->next;
+ Instruction::Code brOpcode = mirNext->dalvikInsn.opcode;
+ ConditionCode ccode = kCondNv;
+ switch(brOpcode) {
+ case Instruction::IF_EQZ:
+ ccode = kCondEq;
+ break;
+ case Instruction::IF_NEZ:
+ ccode = kCondNe;
+ break;
+ case Instruction::IF_LTZ:
+ ccode = kCondLt;
+ break;
+ case Instruction::IF_GEZ:
+ ccode = kCondGe;
+ break;
+ case Instruction::IF_GTZ:
+ ccode = kCondGt;
+ break;
+ case Instruction::IF_LEZ:
+ ccode = kCondLe;
+ break;
+ default:
+ break;
+ }
+ // Make sure result of cmp is used by next insn and nowhere else
+ if ((ccode != kCondNv) &&
+ (mir->ssaRep->defs[0] == mirNext->ssaRep->uses[0]) &&
+ (getSSAUseCount(cUnit, mir->ssaRep->defs[0]) == 1)) {
+ mirNext->dalvikInsn.arg[0] = ccode;
+ switch(opcode) {
+ case Instruction::CMPL_FLOAT:
+ mirNext->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
+ break;
+ case Instruction::CMPL_DOUBLE:
+ mirNext->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
+ break;
+ case Instruction::CMPG_FLOAT:
+ mirNext->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
+ break;
+ case Instruction::CMPG_DOUBLE:
+ mirNext->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
+ break;
+ case Instruction::CMP_LONG:
+ mirNext->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmpLong);
+ break;
+ default: LOG(ERROR) << "Unexpected opcode: " << (int)opcode;
+ }
+ mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ mirNext->ssaRep->numUses = mir->ssaRep->numUses;
+ mirNext->ssaRep->uses = mir->ssaRep->uses;
+ mirNext->ssaRep->fpUse = mir->ssaRep->fpUse;
+ mirNext->ssaRep->numDefs = 0;
+ mir->ssaRep->numUses = 0;
+ mir->ssaRep->numDefs = 0;
+ }
+ }
+ break;
+ default:
+ break;
}
+ }
- if (numTemps > cUnit->numCompilerTemps) {
- cUnit->numCompilerTemps = numTemps;
- }
- return true;
+ if (numTemps > cUnit->numCompilerTemps) {
+ cUnit->numCompilerTemps = numTemps;
+ }
+ return true;
}
bool nullCheckEliminationInit(struct CompilationUnit* cUnit,
struct BasicBlock* bb)
{
- if (bb->dataFlowInfo == NULL) return false;
- bb->dataFlowInfo->endingNullCheckV =
- oatAllocBitVector(cUnit, cUnit->numSSARegs, false, kBitMapNullCheck);
- oatClearAllBits(bb->dataFlowInfo->endingNullCheckV);
- return true;
+ if (bb->dataFlowInfo == NULL) return false;
+ bb->dataFlowInfo->endingNullCheckV =
+ oatAllocBitVector(cUnit, cUnit->numSSARegs, false, kBitMapNullCheck);
+ oatClearAllBits(bb->dataFlowInfo->endingNullCheckV);
+ return true;
}
/* Eliminate unnecessary null checks for a basic block. */
bool eliminateNullChecks( struct CompilationUnit* cUnit, struct BasicBlock* bb)
{
- if (bb->dataFlowInfo == NULL) return false;
+ if (bb->dataFlowInfo == NULL) return false;
+
+ /*
+ * Set initial state. Be conservative with catch
+ * blocks and start with no assumptions about null check
+ * status (except for "this").
+ */
+ if ((bb->blockType == kEntryBlock) | bb->catchEntry) {
+ oatClearAllBits(cUnit->tempSSARegisterV);
+ if ((cUnit->access_flags & kAccStatic) == 0) {
+ // If non-static method, mark "this" as non-null
+ int thisReg = cUnit->numDalvikRegisters - cUnit->numIns;
+ oatSetBit(cUnit, cUnit->tempSSARegisterV, thisReg);
+ }
+ } else {
+ // Starting state is intesection of all incoming arcs
+ GrowableListIterator iter;
+ oatGrowableListIteratorInit(bb->predecessors, &iter);
+ BasicBlock* predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
+ DCHECK(predBB != NULL);
+ oatCopyBitVector(cUnit->tempSSARegisterV,
+ predBB->dataFlowInfo->endingNullCheckV);
+ while (true) {
+ predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
+ if (!predBB) break;
+ if ((predBB->dataFlowInfo == NULL) ||
+ (predBB->dataFlowInfo->endingNullCheckV == NULL)) {
+ continue;
+ }
+ oatIntersectBitVectors(cUnit->tempSSARegisterV,
+ cUnit->tempSSARegisterV,
+ predBB->dataFlowInfo->endingNullCheckV);
+ }
+ }
+
+ // Walk through the instruction in the block, updating as necessary
+ for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ if (mir->ssaRep == NULL) {
+ continue;
+ }
+ int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+
+ // Mark target of NEW* as non-null
+ if (dfAttributes & DF_NON_NULL_DST) {
+ oatSetBit(cUnit, cUnit->tempSSARegisterV, mir->ssaRep->defs[0]);
+ }
+
+ // Mark non-null returns from invoke-style NEW*
+ if (dfAttributes & DF_NON_NULL_RET) {
+ MIR* nextMir = mir->next;
+ // Next should be an MOVE_RESULT_OBJECT
+ if (nextMir &&
+ nextMir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+ // Mark as null checked
+ oatSetBit(cUnit, cUnit->tempSSARegisterV, nextMir->ssaRep->defs[0]);
+ } else {
+ if (nextMir) {
+ LOG(WARNING) << "Unexpected opcode following new: "
+ << (int)nextMir->dalvikInsn.opcode;
+ } else if (bb->fallThrough) {
+ // Look in next basic block
+ struct BasicBlock* nextBB = bb->fallThrough;
+ for (MIR* tmir = nextBB->firstMIRInsn; tmir;
+ tmir =tmir->next) {
+ if ((int)tmir->dalvikInsn.opcode >= (int)kMirOpFirst) {
+ continue;
+ }
+ // First non-pseudo should be MOVE_RESULT_OBJECT
+ if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+ // Mark as null checked
+ oatSetBit(cUnit, cUnit->tempSSARegisterV, tmir->ssaRep->defs[0]);
+ } else {
+ LOG(WARNING) << "Unexpected op after new: "
+ << (int)tmir->dalvikInsn.opcode;
+ }
+ break;
+ }
+ }
+ }
+ }
/*
- * Set initial state. Be conservative with catch
- * blocks and start with no assumptions about null check
- * status (except for "this").
+ * Propagate nullcheck state on register copies (including
+ * Phi pseudo copies. For the latter, nullcheck state is
+ * the "and" of all the Phi's operands.
*/
- if ((bb->blockType == kEntryBlock) | bb->catchEntry) {
- oatClearAllBits(cUnit->tempSSARegisterV);
- if ((cUnit->access_flags & kAccStatic) == 0) {
- // If non-static method, mark "this" as non-null
- int thisReg = cUnit->numDalvikRegisters - cUnit->numIns;
- oatSetBit(cUnit, cUnit->tempSSARegisterV, thisReg);
- }
- } else {
- // Starting state is intesection of all incoming arcs
- GrowableListIterator iter;
- oatGrowableListIteratorInit(bb->predecessors, &iter);
- BasicBlock* predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
- DCHECK(predBB != NULL);
- oatCopyBitVector(cUnit->tempSSARegisterV,
- predBB->dataFlowInfo->endingNullCheckV);
- while (true) {
- predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
- if (!predBB) break;
- if ((predBB->dataFlowInfo == NULL) ||
- (predBB->dataFlowInfo->endingNullCheckV == NULL)) {
- continue;
- }
- oatIntersectBitVectors(cUnit->tempSSARegisterV,
- cUnit->tempSSARegisterV,
- predBB->dataFlowInfo->endingNullCheckV);
- }
+ if (dfAttributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
+ int tgtSreg = mir->ssaRep->defs[0];
+ int operands = (dfAttributes & DF_NULL_TRANSFER_0) ? 1 :
+ mir->ssaRep->numUses;
+ bool nullChecked = true;
+ for (int i = 0; i < operands; i++) {
+ nullChecked &= oatIsBitSet(cUnit->tempSSARegisterV,
+ mir->ssaRep->uses[i]);
+ }
+ if (nullChecked) {
+ oatSetBit(cUnit, cUnit->tempSSARegisterV, tgtSreg);
+ }
}
- // Walk through the instruction in the block, updating as necessary
- for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
- if (mir->ssaRep == NULL) {
- continue;
+ // Already nullchecked?
+ if (dfAttributes & DF_HAS_NULL_CHKS) {
+ int srcIdx;
+ if (dfAttributes & DF_NULL_CHK_1) {
+ srcIdx = 1;
+ } else if (dfAttributes & DF_NULL_CHK_2) {
+ srcIdx = 2;
+ } else {
+ srcIdx = 0;
+ }
+ int srcSreg = mir->ssaRep->uses[srcIdx];
+ if (oatIsBitSet(cUnit->tempSSARegisterV, srcSreg)) {
+ // Eliminate the null check
+ mir->optimizationFlags |= MIR_IGNORE_NULL_CHECK;
+ } else {
+ // Mark sReg as null-checked
+ oatSetBit(cUnit, cUnit->tempSSARegisterV, srcSreg);
}
- int dfAttributes =
- oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ }
+ }
- // Mark target of NEW* as non-null
- if (dfAttributes & DF_NON_NULL_DST) {
- oatSetBit(cUnit, cUnit->tempSSARegisterV, mir->ssaRep->defs[0]);
- }
-
- // Mark non-null returns from invoke-style NEW*
- if (dfAttributes & DF_NON_NULL_RET) {
- MIR* nextMir = mir->next;
- // Next should be an MOVE_RESULT_OBJECT
- if (nextMir && nextMir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
- // Mark as null checked
- oatSetBit(cUnit, cUnit->tempSSARegisterV,
- nextMir->ssaRep->defs[0]);
- } else {
- if (nextMir) {
- LOG(WARNING) << "Unexpected opcode following new: " <<
- (int)nextMir->dalvikInsn.opcode;
- } else if (bb->fallThrough) {
- // Look in next basic block
- struct BasicBlock* nextBB = bb->fallThrough;
- for (MIR* tmir = nextBB->firstMIRInsn; tmir;
- tmir =tmir->next) {
- if ((int)tmir->dalvikInsn.opcode >= (int)kMirOpFirst) {
- continue;
- }
- // First non-pseudo should be MOVE_RESULT_OBJECT
- if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
- // Mark as null checked
- oatSetBit(cUnit, cUnit->tempSSARegisterV,
- tmir->ssaRep->defs[0]);
- } else {
- LOG(WARNING) << "Unexpected op after new: " <<
- (int)tmir->dalvikInsn.opcode;
- }
- break;
- }
- }
- }
- }
-
- /*
- * Propagate nullcheck state on register copies (including
- * Phi pseudo copies. For the latter, nullcheck state is
- * the "and" of all the Phi's operands.
- */
- if (dfAttributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
- int tgtSreg = mir->ssaRep->defs[0];
- int operands = (dfAttributes & DF_NULL_TRANSFER_0) ? 1 :
- mir->ssaRep->numUses;
- bool nullChecked = true;
- for (int i = 0; i < operands; i++) {
- nullChecked &= oatIsBitSet(cUnit->tempSSARegisterV,
- mir->ssaRep->uses[i]);
- }
- if (nullChecked) {
- oatSetBit(cUnit, cUnit->tempSSARegisterV, tgtSreg);
- }
- }
-
- // Already nullchecked?
- if (dfAttributes & DF_HAS_NULL_CHKS) {
- int srcIdx;
- if (dfAttributes & DF_NULL_CHK_1) {
- srcIdx = 1;
- } else if (dfAttributes & DF_NULL_CHK_2) {
- srcIdx = 2;
- } else {
- srcIdx = 0;
- }
- int srcSreg = mir->ssaRep->uses[srcIdx];
- if (oatIsBitSet(cUnit->tempSSARegisterV, srcSreg)) {
- // Eliminate the null check
- mir->optimizationFlags |= MIR_IGNORE_NULL_CHECK;
- } else {
- // Mark sReg as null-checked
- oatSetBit(cUnit, cUnit->tempSSARegisterV, srcSreg);
- }
- }
- }
-
- // Did anything change?
- bool res = oatCompareBitVectors(bb->dataFlowInfo->endingNullCheckV,
- cUnit->tempSSARegisterV);
- if (res) {
- oatCopyBitVector(bb->dataFlowInfo->endingNullCheckV,
- cUnit->tempSSARegisterV);
- }
- return res;
+ // Did anything change?
+ bool res = oatCompareBitVectors(bb->dataFlowInfo->endingNullCheckV,
+ cUnit->tempSSARegisterV);
+ if (res) {
+ oatCopyBitVector(bb->dataFlowInfo->endingNullCheckV,
+ cUnit->tempSSARegisterV);
+ }
+ return res;
}
void oatMethodNullCheckElimination(CompilationUnit *cUnit)
{
- if (!(cUnit->disableOpt & (1 << kNullCheckElimination))) {
- DCHECK(cUnit->tempSSARegisterV != NULL);
- oatDataFlowAnalysisDispatcher(cUnit, nullCheckEliminationInit,
- kAllNodes,
- false /* isIterative */);
- oatDataFlowAnalysisDispatcher(cUnit, eliminateNullChecks,
- kPreOrderDFSTraversal,
- true /* isIterative */);
- }
+ if (!(cUnit->disableOpt & (1 << kNullCheckElimination))) {
+ DCHECK(cUnit->tempSSARegisterV != NULL);
+ oatDataFlowAnalysisDispatcher(cUnit, nullCheckEliminationInit, kAllNodes,
+ false /* isIterative */);
+ oatDataFlowAnalysisDispatcher(cUnit, eliminateNullChecks,
+ kPreOrderDFSTraversal,
+ true /* isIterative */);
+ }
}
void oatMethodBasicBlockOptimization(CompilationUnit *cUnit)
{
+ if (!(cUnit->disableOpt & (1 << kBBOpt))) {
+ oatInitGrowableList(cUnit, &cUnit->compilerTemps, 6, kListMisc);
+ DCHECK_EQ(cUnit->numCompilerTemps, 0);
if (!(cUnit->disableOpt & (1 << kBBOpt))) {
- oatInitGrowableList(cUnit, &cUnit->compilerTemps, 6, kListMisc);
- DCHECK_EQ(cUnit->numCompilerTemps, 0);
- if (!(cUnit->disableOpt & (1 << kBBOpt))) {
- oatDataFlowAnalysisDispatcher(cUnit, basicBlockOpt,
- kAllNodes, false /* isIterative */);
- }
+ oatDataFlowAnalysisDispatcher(cUnit, basicBlockOpt,
+ kAllNodes, false /* isIterative */);
}
+ }
}
void addLoopHeader(CompilationUnit* cUnit, BasicBlock* header,
- BasicBlock* backEdge)
+ BasicBlock* backEdge)
{
- GrowableListIterator iter;
- oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
- for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
- (loop != NULL); loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
- if (loop->header == header) {
- oatInsertGrowableList(cUnit, &loop->incomingBackEdges,
- (intptr_t)backEdge);
- return;
- }
+ GrowableListIterator iter;
+ oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
+ (loop != NULL); loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
+ if (loop->header == header) {
+ oatInsertGrowableList(cUnit, &loop->incomingBackEdges,
+ (intptr_t)backEdge);
+ return;
}
- LoopInfo* info = (LoopInfo*)oatNew(cUnit, sizeof(LoopInfo), true,
- kAllocDFInfo);
- info->header = header;
- oatInitGrowableList(cUnit, &info->incomingBackEdges, 2, kListMisc);
- oatInsertGrowableList(cUnit, &info->incomingBackEdges, (intptr_t)backEdge);
- oatInsertGrowableList(cUnit, &cUnit->loopHeaders, (intptr_t)info);
+ }
+ LoopInfo* info = (LoopInfo*)oatNew(cUnit, sizeof(LoopInfo), true,
+ kAllocDFInfo);
+ info->header = header;
+ oatInitGrowableList(cUnit, &info->incomingBackEdges, 2, kListMisc);
+ oatInsertGrowableList(cUnit, &info->incomingBackEdges, (intptr_t)backEdge);
+ oatInsertGrowableList(cUnit, &cUnit->loopHeaders, (intptr_t)info);
}
bool findBackEdges(struct CompilationUnit* cUnit, struct BasicBlock* bb)
{
- if ((bb->dataFlowInfo == NULL) || (bb->lastMIRInsn == NULL)) {
- return false;
- }
- Instruction::Code opcode = bb->lastMIRInsn->dalvikInsn.opcode;
- if (Instruction::Flags(opcode) & Instruction::kBranch) {
- if (bb->taken && (bb->taken->startOffset <= bb->startOffset)) {
- DCHECK(bb->dominators != NULL);
- if (oatIsBitSet(bb->dominators, bb->taken->id)) {
- if (cUnit->printMe) {
- LOG(INFO) << "Loop backedge from 0x"
- << std::hex << bb->lastMIRInsn->offset
- << " to 0x" << std::hex << bb->taken->startOffset;
- }
- addLoopHeader(cUnit, bb->taken, bb);
- }
- }
- }
+ if ((bb->dataFlowInfo == NULL) || (bb->lastMIRInsn == NULL)) {
return false;
+ }
+ Instruction::Code opcode = bb->lastMIRInsn->dalvikInsn.opcode;
+ if (Instruction::Flags(opcode) & Instruction::kBranch) {
+ if (bb->taken && (bb->taken->startOffset <= bb->startOffset)) {
+ DCHECK(bb->dominators != NULL);
+ if (oatIsBitSet(bb->dominators, bb->taken->id)) {
+ if (cUnit->printMe) {
+ LOG(INFO) << "Loop backedge from 0x"
+ << std::hex << bb->lastMIRInsn->offset
+ << " to 0x" << std::hex << bb->taken->startOffset;
+ }
+ addLoopHeader(cUnit, bb->taken, bb);
+ }
+ }
+ }
+ return false;
}
void addBlocksToLoop(CompilationUnit* cUnit, ArenaBitVector* blocks,
- BasicBlock* bb, int headId)
+ BasicBlock* bb, int headId)
{
- if (!oatIsBitSet(bb->dominators, headId) ||
- oatIsBitSet(blocks, bb->id)) {
- return;
- }
- oatSetBit(cUnit, blocks, bb->id);
- GrowableListIterator iter;
- oatGrowableListIteratorInit(bb->predecessors, &iter);
- BasicBlock* predBB;
- for (predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter); predBB;
- predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter)) {
- addBlocksToLoop(cUnit, blocks, predBB, headId);
- }
+ if (!oatIsBitSet(bb->dominators, headId) ||
+ oatIsBitSet(blocks, bb->id)) {
+ return;
+ }
+ oatSetBit(cUnit, blocks, bb->id);
+ GrowableListIterator iter;
+ oatGrowableListIteratorInit(bb->predecessors, &iter);
+ BasicBlock* predBB;
+ for (predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter); predBB;
+ predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter)) {
+ addBlocksToLoop(cUnit, blocks, predBB, headId);
+ }
}
void oatDumpLoops(CompilationUnit *cUnit)
{
+ GrowableListIterator iter;
+ oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
+ (loop != NULL); loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
+ LOG(INFO) << "Loop head block id " << loop->header->id
+ << ", offset 0x" << std::hex << loop->header->startOffset
+ << ", Depth: " << loop->header->nestingDepth;
GrowableListIterator iter;
- oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
- for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
- (loop != NULL); loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
- LOG(INFO) << "Loop head block id " << loop->header->id
- << ", offset 0x" << std::hex << loop->header->startOffset
- << ", Depth: " << loop->header->nestingDepth;
- GrowableListIterator iter;
- oatGrowableListIteratorInit(&loop->incomingBackEdges, &iter);
- BasicBlock* edgeBB;
- for (edgeBB = (BasicBlock*)oatGrowableListIteratorNext(&iter); edgeBB;
- edgeBB = (BasicBlock*)oatGrowableListIteratorNext(&iter)) {
- LOG(INFO) << " Backedge block id " << edgeBB->id
- << ", offset 0x" << std::hex << edgeBB->startOffset;
- ArenaBitVectorIterator bIter;
- oatBitVectorIteratorInit(loop->blocks, &bIter);
- for (int bbId = oatBitVectorIteratorNext(&bIter); bbId != -1;
- bbId = oatBitVectorIteratorNext(&bIter)) {
- BasicBlock *bb;
- bb = (BasicBlock*)
- oatGrowableListGetElement(&cUnit->blockList, bbId);
- LOG(INFO) << " (" << bb->id << ", 0x" << std::hex
- << bb->startOffset << ")";
- }
- }
+ oatGrowableListIteratorInit(&loop->incomingBackEdges, &iter);
+ BasicBlock* edgeBB;
+ for (edgeBB = (BasicBlock*)oatGrowableListIteratorNext(&iter); edgeBB;
+ edgeBB = (BasicBlock*)oatGrowableListIteratorNext(&iter)) {
+ LOG(INFO) << " Backedge block id " << edgeBB->id
+ << ", offset 0x" << std::hex << edgeBB->startOffset;
+ ArenaBitVectorIterator bIter;
+ oatBitVectorIteratorInit(loop->blocks, &bIter);
+ for (int bbId = oatBitVectorIteratorNext(&bIter); bbId != -1;
+ bbId = oatBitVectorIteratorNext(&bIter)) {
+ BasicBlock *bb;
+ bb = (BasicBlock*)
+ oatGrowableListGetElement(&cUnit->blockList, bbId);
+ LOG(INFO) << " (" << bb->id << ", 0x" << std::hex
+ << bb->startOffset << ")";
+ }
}
+ }
}
void oatMethodLoopDetection(CompilationUnit *cUnit)
{
- if (cUnit->disableOpt & (1 << kPromoteRegs)) {
- return;
- }
- oatInitGrowableList(cUnit, &cUnit->loopHeaders, 6, kListMisc);
- // Find the loop headers
- oatDataFlowAnalysisDispatcher(cUnit, findBackEdges,
- kAllNodes, false /* isIterative */);
+ if (cUnit->disableOpt & (1 << kPromoteRegs)) {
+ return;
+ }
+ oatInitGrowableList(cUnit, &cUnit->loopHeaders, 6, kListMisc);
+ // Find the loop headers
+ oatDataFlowAnalysisDispatcher(cUnit, findBackEdges,
+ kAllNodes, false /* isIterative */);
+ GrowableListIterator iter;
+ oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ // Add blocks to each header
+ for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
+ loop; loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
+ loop->blocks = oatAllocBitVector(cUnit, cUnit->numBlocks, true,
+ kBitMapMisc);
+ oatSetBit(cUnit, loop->blocks, loop->header->id);
GrowableListIterator iter;
- oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
- // Add blocks to each header
- for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
- loop; loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
- loop->blocks = oatAllocBitVector(cUnit, cUnit->numBlocks, true,
- kBitMapMisc);
- oatSetBit(cUnit, loop->blocks, loop->header->id);
- GrowableListIterator iter;
- oatGrowableListIteratorInit(&loop->incomingBackEdges, &iter);
- BasicBlock* edgeBB;
- for (edgeBB = (BasicBlock*)oatGrowableListIteratorNext(&iter); edgeBB;
- edgeBB = (BasicBlock*)oatGrowableListIteratorNext(&iter)) {
- addBlocksToLoop(cUnit, loop->blocks, edgeBB, loop->header->id);
- }
+ oatGrowableListIteratorInit(&loop->incomingBackEdges, &iter);
+ BasicBlock* edgeBB;
+ for (edgeBB = (BasicBlock*)oatGrowableListIteratorNext(&iter); edgeBB;
+ edgeBB = (BasicBlock*)oatGrowableListIteratorNext(&iter)) {
+ addBlocksToLoop(cUnit, loop->blocks, edgeBB, loop->header->id);
}
- // Compute the nesting depth of each header
- oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
- for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
- loop; loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
- GrowableListIterator iter2;
- oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter2);
- LoopInfo* loop2;
- for (loop2 = (LoopInfo*)oatGrowableListIteratorNext(&iter2);
- loop2; loop2 = (LoopInfo*)oatGrowableListIteratorNext(&iter2)) {
- if (oatIsBitSet(loop2->blocks, loop->header->id)) {
- loop->header->nestingDepth++;
- }
- }
+ }
+ // Compute the nesting depth of each header
+ oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
+ loop; loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
+ GrowableListIterator iter2;
+ oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter2);
+ LoopInfo* loop2;
+ for (loop2 = (LoopInfo*)oatGrowableListIteratorNext(&iter2);
+ loop2; loop2 = (LoopInfo*)oatGrowableListIteratorNext(&iter2)) {
+ if (oatIsBitSet(loop2->blocks, loop->header->id)) {
+ loop->header->nestingDepth++;
+ }
}
- // Assign nesting depth to each block in all loops
- oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
- for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
- (loop != NULL); loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
- ArenaBitVectorIterator bIter;
- oatBitVectorIteratorInit(loop->blocks, &bIter);
- for (int bbId = oatBitVectorIteratorNext(&bIter); bbId != -1;
- bbId = oatBitVectorIteratorNext(&bIter)) {
- BasicBlock *bb;
- bb = (BasicBlock*) oatGrowableListGetElement(&cUnit->blockList,
- bbId);
- bb->nestingDepth = std::max(bb->nestingDepth,
- loop->header->nestingDepth);
- }
+ }
+ // Assign nesting depth to each block in all loops
+ oatGrowableListIteratorInit(&cUnit->loopHeaders, &iter);
+ for (LoopInfo* loop = (LoopInfo*)oatGrowableListIteratorNext(&iter);
+ (loop != NULL); loop = (LoopInfo*)oatGrowableListIteratorNext(&iter)) {
+ ArenaBitVectorIterator bIter;
+ oatBitVectorIteratorInit(loop->blocks, &bIter);
+ for (int bbId = oatBitVectorIteratorNext(&bIter); bbId != -1;
+ bbId = oatBitVectorIteratorNext(&bIter)) {
+ BasicBlock *bb;
+ bb = (BasicBlock*) oatGrowableListGetElement(&cUnit->blockList, bbId);
+ bb->nestingDepth = std::max(bb->nestingDepth,
+ loop->header->nestingDepth);
}
- if (cUnit->printMe) {
- oatDumpLoops(cUnit);
- }
+ }
+ if (cUnit->printMe) {
+ oatDumpLoops(cUnit);
+ }
}
/*
@@ -2263,48 +2246,48 @@
*/
bool invokeUsesMethodStar(CompilationUnit* cUnit, MIR* mir)
{
- InvokeType type;
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- switch (opcode) {
- case Instruction::INVOKE_STATIC:
- case Instruction::INVOKE_STATIC_RANGE:
- type = kStatic;
- break;
- case Instruction::INVOKE_DIRECT:
- case Instruction::INVOKE_DIRECT_RANGE:
- type = kDirect;
- break;
- case Instruction::INVOKE_VIRTUAL:
- case Instruction::INVOKE_VIRTUAL_RANGE:
- type = kVirtual;
- break;
- case Instruction::INVOKE_INTERFACE:
- case Instruction::INVOKE_INTERFACE_RANGE:
- return false;
- case Instruction::INVOKE_SUPER_RANGE:
- case Instruction::INVOKE_SUPER:
- type = kSuper;
- break;
- default:
- LOG(WARNING) << "Unexpected invoke op: " << (int)opcode;
- return false;
- }
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file, *cUnit->dex_cache,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
- // TODO: add a flag so we don't counts the stats for this twice
- uint32_t dexMethodIdx = mir->dalvikInsn.vB;
- int vtableIdx;
- uintptr_t directCode;
- uintptr_t directMethod;
- bool fastPath =
- cUnit->compiler->ComputeInvokeInfo(dexMethodIdx, &mUnit, type,
- vtableIdx, directCode,
- directMethod) &&
- !SLOW_INVOKE_PATH;
- return (((type == kDirect) || (type == kStatic)) &&
- fastPath && ((directCode == 0) || (directMethod == 0)));
+ InvokeType type;
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_STATIC_RANGE:
+ type = kStatic;
+ break;
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ type = kDirect;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ type = kVirtual;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ return false;
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_SUPER:
+ type = kSuper;
+ break;
+ default:
+ LOG(WARNING) << "Unexpected invoke op: " << (int)opcode;
+ return false;
+ }
+ OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
+ *cUnit->dex_file, *cUnit->dex_cache,
+ cUnit->code_item, cUnit->method_idx,
+ cUnit->access_flags);
+ // TODO: add a flag so we don't counts the stats for this twice
+ uint32_t dexMethodIdx = mir->dalvikInsn.vB;
+ int vtableIdx;
+ uintptr_t directCode;
+ uintptr_t directMethod;
+ bool fastPath =
+ cUnit->compiler->ComputeInvokeInfo(dexMethodIdx, &mUnit, type,
+ vtableIdx, directCode,
+ directMethod) &&
+ !SLOW_INVOKE_PATH;
+ return (((type == kDirect) || (type == kStatic)) &&
+ fastPath && ((directCode == 0) || (directMethod == 0)));
}
/*
@@ -2314,61 +2297,61 @@
*/
bool countUses(struct CompilationUnit* cUnit, struct BasicBlock* bb)
{
- if (bb->blockType != kDalvikByteCode) {
- return false;
- }
- for (MIR* mir = bb->firstMIRInsn; (mir != NULL); mir = mir->next) {
- if (mir->ssaRep == NULL) {
- continue;
- }
- uint32_t weight = std::min(16U, (uint32_t)bb->nestingDepth);
- for (int i = 0; i < mir->ssaRep->numUses; i++) {
- int sReg = mir->ssaRep->uses[i];
- DCHECK_LT(sReg, (int)cUnit->useCounts.numUsed);
- cUnit->rawUseCounts.elemList[sReg]++;
- cUnit->useCounts.elemList[sReg] += (1 << weight);
- }
- if (!(cUnit->disableOpt & (1 << kPromoteCompilerTemps))) {
- int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
- // Implicit use of Method* ? */
- if (dfAttributes & DF_UMS) {
- /*
- * Some invokes will not use Method* - need to perform test similar
- * to that found in genInvoke() to decide whether to count refs
- * for Method* on invoke-class opcodes.
- * TODO: refactor for common test here, save results for genInvoke
- */
- int usesMethodStar = true;
- if ((dfAttributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) &&
- !(dfAttributes & DF_NON_NULL_RET)) {
- usesMethodStar &= invokeUsesMethodStar(cUnit, mir);
- }
- if (usesMethodStar) {
- cUnit->rawUseCounts.elemList[cUnit->methodSReg]++;
- cUnit->useCounts.elemList[cUnit->methodSReg] += (1 << weight);
- }
- }
- }
- }
+ if (bb->blockType != kDalvikByteCode) {
return false;
+ }
+ for (MIR* mir = bb->firstMIRInsn; (mir != NULL); mir = mir->next) {
+ if (mir->ssaRep == NULL) {
+ continue;
+ }
+ uint32_t weight = std::min(16U, (uint32_t)bb->nestingDepth);
+ for (int i = 0; i < mir->ssaRep->numUses; i++) {
+ int sReg = mir->ssaRep->uses[i];
+ DCHECK_LT(sReg, (int)cUnit->useCounts.numUsed);
+ cUnit->rawUseCounts.elemList[sReg]++;
+ cUnit->useCounts.elemList[sReg] += (1 << weight);
+ }
+ if (!(cUnit->disableOpt & (1 << kPromoteCompilerTemps))) {
+ int dfAttributes = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ // Implicit use of Method* ? */
+ if (dfAttributes & DF_UMS) {
+ /*
+ * Some invokes will not use Method* - need to perform test similar
+ * to that found in genInvoke() to decide whether to count refs
+ * for Method* on invoke-class opcodes.
+ * TODO: refactor for common test here, save results for genInvoke
+ */
+ int usesMethodStar = true;
+ if ((dfAttributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) &&
+ !(dfAttributes & DF_NON_NULL_RET)) {
+ usesMethodStar &= invokeUsesMethodStar(cUnit, mir);
+ }
+ if (usesMethodStar) {
+ cUnit->rawUseCounts.elemList[cUnit->methodSReg]++;
+ cUnit->useCounts.elemList[cUnit->methodSReg] += (1 << weight);
+ }
+ }
+ }
+ }
+ return false;
}
void oatMethodUseCount(CompilationUnit *cUnit)
{
- oatInitGrowableList(cUnit, &cUnit->useCounts, cUnit->numSSARegs + 32,
- kListMisc);
- oatInitGrowableList(cUnit, &cUnit->rawUseCounts, cUnit->numSSARegs + 32,
- kListMisc);
- // Initialize list
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- oatInsertGrowableList(cUnit, &cUnit->useCounts, 0);
- oatInsertGrowableList(cUnit, &cUnit->rawUseCounts, 0);
- }
- if (cUnit->disableOpt & (1 << kPromoteRegs)) {
- return;
- }
- oatDataFlowAnalysisDispatcher(cUnit, countUses,
- kAllNodes, false /* isIterative */);
+ oatInitGrowableList(cUnit, &cUnit->useCounts, cUnit->numSSARegs + 32,
+ kListMisc);
+ oatInitGrowableList(cUnit, &cUnit->rawUseCounts, cUnit->numSSARegs + 32,
+ kListMisc);
+ // Initialize list
+ for (int i = 0; i < cUnit->numSSARegs; i++) {
+ oatInsertGrowableList(cUnit, &cUnit->useCounts, 0);
+ oatInsertGrowableList(cUnit, &cUnit->rawUseCounts, 0);
+ }
+ if (cUnit->disableOpt & (1 << kPromoteRegs)) {
+ return;
+ }
+ oatDataFlowAnalysisDispatcher(cUnit, countUses,
+ kAllNodes, false /* isIterative */);
}
} // namespace art
diff --git a/src/compiler/Dataflow.h b/src/compiler/Dataflow.h
index a51173f..941b544 100644
--- a/src/compiler/Dataflow.h
+++ b/src/compiler/Dataflow.h
@@ -23,38 +23,38 @@
namespace art {
enum DataFlowAttributePos {
- kUA = 0,
- kUB,
- kUC,
- kUAWide,
- kUBWide,
- kUCWide,
- kDA,
- kDAWide,
- kIsMove,
- kIsLinear,
- kSetsConst,
- kFormat35c,
- kFormat3rc,
- kPhi,
- kNullCheckSrc0, // Null check of uses[0]
- kNullCheckSrc1, // Null check of uses[1]
- kNullCheckSrc2, // Null check of uses[2]
- kNullCheckOut0, // Null check out outgoing arg0
- kDstNonNull, // May assume dst is non-null
- kRetNonNull, // May assume retval is non-null
- kNullTransferSrc0, // Object copy src[0] -> dst
- kNullTransferSrcN, // Phi null check state transfer
- kRangeCheckSrc1, // Range check of uses[1]
- kRangeCheckSrc2, // Range check of uses[2]
- kRangeCheckSrc3, // Range check of uses[3]
- kFPA,
- kFPB,
- kFPC,
- kCoreA,
- kCoreB,
- kCoreC,
- kUsesMethodStar, // Implicit use of Method*
+ kUA = 0,
+ kUB,
+ kUC,
+ kUAWide,
+ kUBWide,
+ kUCWide,
+ kDA,
+ kDAWide,
+ kIsMove,
+ kIsLinear,
+ kSetsConst,
+ kFormat35c,
+ kFormat3rc,
+ kPhi,
+ kNullCheckSrc0, // Null check of uses[0]
+ kNullCheckSrc1, // Null check of uses[1]
+ kNullCheckSrc2, // Null check of uses[2]
+ kNullCheckOut0, // Null check out outgoing arg0
+ kDstNonNull, // May assume dst is non-null
+ kRetNonNull, // May assume retval is non-null
+ kNullTransferSrc0, // Object copy src[0] -> dst
+ kNullTransferSrcN, // Phi null check state transfer
+ kRangeCheckSrc1, // Range check of uses[1]
+ kRangeCheckSrc2, // Range check of uses[2]
+ kRangeCheckSrc3, // Range check of uses[3]
+ kFPA,
+ kFPB,
+ kFPC,
+ kCoreA,
+ kCoreB,
+ kCoreC,
+ kUsesMethodStar, // Implicit use of Method*
};
#define DF_NOP 0
@@ -114,21 +114,21 @@
extern const int oatDataFlowAttributes[kMirOpLast];
struct BasicBlockDataFlow {
- ArenaBitVector* useV;
- ArenaBitVector* defV;
- ArenaBitVector* liveInV;
- ArenaBitVector* phiV;
- int* vRegToSSAMap;
- ArenaBitVector* endingNullCheckV;
+ ArenaBitVector* useV;
+ ArenaBitVector* defV;
+ ArenaBitVector* liveInV;
+ ArenaBitVector* phiV;
+ int* vRegToSSAMap;
+ ArenaBitVector* endingNullCheckV;
};
struct SSARepresentation {
- int numUses;
- int* uses;
- bool* fpUse;
- int numDefs;
- int* defs;
- bool* fpDef;
+ int numUses;
+ int* uses;
+ bool* fpUse;
+ int numDefs;
+ int* defs;
+ bool* fpDef;
};
/*
@@ -136,24 +136,24 @@
* induction variable.
*/
struct InductionVariableInfo {
- int ssaReg;
- int basicSSAReg;
- int m; // multiplier
- int c; // constant
- int inc; // loop increment
+ int ssaReg;
+ int basicSSAReg;
+ int m; // multiplier
+ int c; // constant
+ int inc; // loop increment
};
struct ArrayAccessInfo {
- int arrayReg;
- int ivReg;
- int maxC; // For DIV - will affect upper bound checking
- int minC; // For DIV - will affect lower bound checking
+ int arrayReg;
+ int ivReg;
+ int maxC; // For DIV - will affect upper bound checking
+ int minC; // For DIV - will affect lower bound checking
};
struct LoopInfo {
- BasicBlock* header;
- GrowableList incomingBackEdges;
- ArenaBitVector* blocks;
+ BasicBlock* header;
+ GrowableList incomingBackEdges;
+ ArenaBitVector* blocks;
};
void oatMethodLoopDetection(CompilationUnit*);
diff --git a/src/compiler/Frontend.cc b/src/compiler/Frontend.cc
index dbaf323..dce9b8b 100644
--- a/src/compiler/Frontend.cc
+++ b/src/compiler/Frontend.cc
@@ -25,51 +25,51 @@
/* Default optimizer/debug setting for the compiler. */
static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations
- //(1 << kLoadStoreElimination) |
- //(1 << kLoadHoisting) |
- //(1 << kSuppressLoads) |
- //(1 << kNullCheckElimination) |
- //(1 << kPromoteRegs) |
- //(1 << kTrackLiveTemps) |
- //(1 << kSkipLargeMethodOptimization) |
- //(1 << kSafeOptimizations) |
- //(1 << kBBOpt) |
- //(1 << kMatch) |
- //(1 << kPromoteCompilerTemps) |
- 0;
+ //(1 << kLoadStoreElimination) |
+ //(1 << kLoadHoisting) |
+ //(1 << kSuppressLoads) |
+ //(1 << kNullCheckElimination) |
+ //(1 << kPromoteRegs) |
+ //(1 << kTrackLiveTemps) |
+ //(1 << kSkipLargeMethodOptimization) |
+ //(1 << kSafeOptimizations) |
+ //(1 << kBBOpt) |
+ //(1 << kMatch) |
+ //(1 << kPromoteCompilerTemps) |
+ 0;
static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes
- //(1 << kDebugDisplayMissingTargets) |
- //(1 << kDebugVerbose) |
- //(1 << kDebugDumpCFG) |
- //(1 << kDebugSlowFieldPath) |
- //(1 << kDebugSlowInvokePath) |
- //(1 << kDebugSlowStringPath) |
- //(1 << kDebugSlowestFieldPath) |
- //(1 << kDebugSlowestStringPath) |
- //(1 << kDebugExerciseResolveMethod) |
- //(1 << kDebugVerifyDataflow) |
- //(1 << kDebugShowMemoryUsage) |
- //(1 << kDebugShowNops) |
- //(1 << kDebugCountOpcodes) |
- 0;
+ //(1 << kDebugDisplayMissingTargets) |
+ //(1 << kDebugVerbose) |
+ //(1 << kDebugDumpCFG) |
+ //(1 << kDebugSlowFieldPath) |
+ //(1 << kDebugSlowInvokePath) |
+ //(1 << kDebugSlowStringPath) |
+ //(1 << kDebugSlowestFieldPath) |
+ //(1 << kDebugSlowestStringPath) |
+ //(1 << kDebugExerciseResolveMethod) |
+ //(1 << kDebugVerifyDataflow) |
+ //(1 << kDebugShowMemoryUsage) |
+ //(1 << kDebugShowNops) |
+ //(1 << kDebugCountOpcodes) |
+ 0;
inline bool contentIsInsn(const u2* codePtr) {
- u2 instr = *codePtr;
- Instruction::Code opcode = (Instruction::Code)(instr & 0xff);
+ u2 instr = *codePtr;
+ Instruction::Code opcode = (Instruction::Code)(instr & 0xff);
- /*
- * Since the low 8-bit in metadata may look like NOP, we need to check
- * both the low and whole sub-word to determine whether it is code or data.
- */
- return (opcode != Instruction::NOP || instr == 0);
+ /*
+ * Since the low 8-bit in metadata may look like NOP, we need to check
+ * both the low and whole sub-word to determine whether it is code or data.
+ */
+ return (opcode != Instruction::NOP || instr == 0);
}
/*
* Parse an instruction, return the length of the instruction
*/
inline int parseInsn(CompilationUnit* cUnit, const u2* codePtr,
- DecodedInstruction* decoded_instruction, bool printMe)
+ DecodedInstruction* decoded_instruction, bool printMe)
{
// Don't parse instruction data
if (!contentIsInsn(codePtr)) {
@@ -80,8 +80,10 @@
*decoded_instruction = DecodedInstruction(instruction);
if (printMe) {
- char* decodedString = oatGetDalvikDisassembly(cUnit, *decoded_instruction, NULL);
- LOG(INFO) << codePtr << ": 0x" << std::hex << static_cast<int>(decoded_instruction->opcode)
+ char* decodedString = oatGetDalvikDisassembly(cUnit, *decoded_instruction,
+ NULL);
+ LOG(INFO) << codePtr << ": 0x"
+ << std::hex << static_cast<int>(decoded_instruction->opcode)
<< " " << decodedString;
}
return instruction->SizeInCodeUnits();
@@ -97,7 +99,7 @@
return true;
default:
return false;
- }
+ }
}
/*
@@ -110,90 +112,89 @@
case Instruction::RETURN_WIDE:
case Instruction::RETURN_OBJECT:
return true;
- default:
- return isGoto(insn);
+ default:
+ return isGoto(insn);
}
}
/* Split an existing block from the specified code offset into two */
BasicBlock *splitBlock(CompilationUnit* cUnit, unsigned int codeOffset,
- BasicBlock* origBlock, BasicBlock** immedPredBlockP)
+ BasicBlock* origBlock, BasicBlock** immedPredBlockP)
{
- MIR* insn = origBlock->firstMIRInsn;
- while (insn) {
- if (insn->offset == codeOffset) break;
- insn = insn->next;
- }
- if (insn == NULL) {
- LOG(FATAL) << "Break split failed";
- }
- BasicBlock *bottomBlock = oatNewBB(cUnit, kDalvikByteCode,
- cUnit->numBlocks++);
- oatInsertGrowableList(cUnit, &cUnit->blockList, (intptr_t) bottomBlock);
+ MIR* insn = origBlock->firstMIRInsn;
+ while (insn) {
+ if (insn->offset == codeOffset) break;
+ insn = insn->next;
+ }
+ if (insn == NULL) {
+ LOG(FATAL) << "Break split failed";
+ }
+ BasicBlock *bottomBlock = oatNewBB(cUnit, kDalvikByteCode,
+ cUnit->numBlocks++);
+ oatInsertGrowableList(cUnit, &cUnit->blockList, (intptr_t) bottomBlock);
- bottomBlock->startOffset = codeOffset;
- bottomBlock->firstMIRInsn = insn;
- bottomBlock->lastMIRInsn = origBlock->lastMIRInsn;
+ bottomBlock->startOffset = codeOffset;
+ bottomBlock->firstMIRInsn = insn;
+ bottomBlock->lastMIRInsn = origBlock->lastMIRInsn;
- /* Add it to the quick lookup cache */
- cUnit->blockMap.Put(bottomBlock->startOffset, bottomBlock);
+ /* Add it to the quick lookup cache */
+ cUnit->blockMap.Put(bottomBlock->startOffset, bottomBlock);
- /* Handle the taken path */
- bottomBlock->taken = origBlock->taken;
- if (bottomBlock->taken) {
- origBlock->taken = NULL;
- oatDeleteGrowableList(bottomBlock->taken->predecessors,
- (intptr_t)origBlock);
- oatInsertGrowableList(cUnit, bottomBlock->taken->predecessors,
- (intptr_t)bottomBlock);
- }
-
- /* Handle the fallthrough path */
- bottomBlock->needFallThroughBranch = origBlock->needFallThroughBranch;
- bottomBlock->fallThrough = origBlock->fallThrough;
- origBlock->fallThrough = bottomBlock;
- origBlock->needFallThroughBranch = true;
- oatInsertGrowableList(cUnit, bottomBlock->predecessors,
+ /* Handle the taken path */
+ bottomBlock->taken = origBlock->taken;
+ if (bottomBlock->taken) {
+ origBlock->taken = NULL;
+ oatDeleteGrowableList(bottomBlock->taken->predecessors,
(intptr_t)origBlock);
- if (bottomBlock->fallThrough) {
- oatDeleteGrowableList(bottomBlock->fallThrough->predecessors,
- (intptr_t)origBlock);
- oatInsertGrowableList(cUnit, bottomBlock->fallThrough->predecessors,
- (intptr_t)bottomBlock);
+ oatInsertGrowableList(cUnit, bottomBlock->taken->predecessors,
+ (intptr_t)bottomBlock);
+ }
+
+ /* Handle the fallthrough path */
+ bottomBlock->needFallThroughBranch = origBlock->needFallThroughBranch;
+ bottomBlock->fallThrough = origBlock->fallThrough;
+ origBlock->fallThrough = bottomBlock;
+ origBlock->needFallThroughBranch = true;
+ oatInsertGrowableList(cUnit, bottomBlock->predecessors,
+ (intptr_t)origBlock);
+ if (bottomBlock->fallThrough) {
+ oatDeleteGrowableList(bottomBlock->fallThrough->predecessors,
+ (intptr_t)origBlock);
+ oatInsertGrowableList(cUnit, bottomBlock->fallThrough->predecessors,
+ (intptr_t)bottomBlock);
+ }
+
+ /* Handle the successor list */
+ if (origBlock->successorBlockList.blockListType != kNotUsed) {
+ bottomBlock->successorBlockList = origBlock->successorBlockList;
+ origBlock->successorBlockList.blockListType = kNotUsed;
+ GrowableListIterator iterator;
+
+ oatGrowableListIteratorInit(&bottomBlock->successorBlockList.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successorBlockInfo =
+ (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
+ if (successorBlockInfo == NULL) break;
+ BasicBlock *bb = successorBlockInfo->block;
+ oatDeleteGrowableList(bb->predecessors, (intptr_t)origBlock);
+ oatInsertGrowableList(cUnit, bb->predecessors, (intptr_t)bottomBlock);
}
+ }
- /* Handle the successor list */
- if (origBlock->successorBlockList.blockListType != kNotUsed) {
- bottomBlock->successorBlockList = origBlock->successorBlockList;
- origBlock->successorBlockList.blockListType = kNotUsed;
- GrowableListIterator iterator;
+ origBlock->lastMIRInsn = insn->prev;
- oatGrowableListIteratorInit(&bottomBlock->successorBlockList.blocks,
- &iterator);
- while (true) {
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
- if (successorBlockInfo == NULL) break;
- BasicBlock *bb = successorBlockInfo->block;
- oatDeleteGrowableList(bb->predecessors, (intptr_t)origBlock);
- oatInsertGrowableList(cUnit, bb->predecessors,
- (intptr_t)bottomBlock);
- }
- }
-
- origBlock->lastMIRInsn = insn->prev;
-
- insn->prev->next = NULL;
- insn->prev = NULL;
- /*
- * Update the immediate predecessor block pointer so that outgoing edges
- * can be applied to the proper block.
- */
- if (immedPredBlockP) {
- DCHECK_EQ(*immedPredBlockP, origBlock);
- *immedPredBlockP = bottomBlock;
- }
- return bottomBlock;
+ insn->prev->next = NULL;
+ insn->prev = NULL;
+ /*
+ * Update the immediate predecessor block pointer so that outgoing edges
+ * can be applied to the proper block.
+ */
+ if (immedPredBlockP) {
+ DCHECK_EQ(*immedPredBlockP, origBlock);
+ *immedPredBlockP = bottomBlock;
+ }
+ return bottomBlock;
}
/*
@@ -207,444 +208,441 @@
BasicBlock *findBlock(CompilationUnit* cUnit, unsigned int codeOffset,
bool split, bool create, BasicBlock** immedPredBlockP)
{
- GrowableList* blockList = &cUnit->blockList;
- BasicBlock* bb;
- unsigned int i;
- SafeMap<unsigned int, BasicBlock*>::iterator it;
+ GrowableList* blockList = &cUnit->blockList;
+ BasicBlock* bb;
+ unsigned int i;
+ SafeMap<unsigned int, BasicBlock*>::iterator it;
- it = cUnit->blockMap.find(codeOffset);
- if (it != cUnit->blockMap.end()) {
- return it->second;
- } else if (!create) {
- return NULL;
+ it = cUnit->blockMap.find(codeOffset);
+ if (it != cUnit->blockMap.end()) {
+ return it->second;
+ } else if (!create) {
+ return NULL;
+ }
+
+ if (split) {
+ for (i = 0; i < blockList->numUsed; i++) {
+ bb = (BasicBlock *) blockList->elemList[i];
+ if (bb->blockType != kDalvikByteCode) continue;
+ /* Check if a branch jumps into the middle of an existing block */
+ if ((codeOffset > bb->startOffset) && (bb->lastMIRInsn != NULL) &&
+ (codeOffset <= bb->lastMIRInsn->offset)) {
+ BasicBlock *newBB = splitBlock(cUnit, codeOffset, bb,
+ bb == *immedPredBlockP ?
+ immedPredBlockP : NULL);
+ return newBB;
+ }
}
+ }
- if (split) {
- for (i = 0; i < blockList->numUsed; i++) {
- bb = (BasicBlock *) blockList->elemList[i];
- if (bb->blockType != kDalvikByteCode) continue;
- /* Check if a branch jumps into the middle of an existing block */
- if ((codeOffset > bb->startOffset) && (bb->lastMIRInsn != NULL) &&
- (codeOffset <= bb->lastMIRInsn->offset)) {
- BasicBlock *newBB = splitBlock(cUnit, codeOffset, bb,
- bb == *immedPredBlockP ?
- immedPredBlockP : NULL);
- return newBB;
- }
- }
- }
-
- /* Create a new one */
- bb = oatNewBB(cUnit, kDalvikByteCode, cUnit->numBlocks++);
- oatInsertGrowableList(cUnit, &cUnit->blockList, (intptr_t) bb);
- bb->startOffset = codeOffset;
- cUnit->blockMap.Put(bb->startOffset, bb);
- return bb;
+ /* Create a new one */
+ bb = oatNewBB(cUnit, kDalvikByteCode, cUnit->numBlocks++);
+ oatInsertGrowableList(cUnit, &cUnit->blockList, (intptr_t) bb);
+ bb->startOffset = codeOffset;
+ cUnit->blockMap.Put(bb->startOffset, bb);
+ return bb;
}
/* Dump the CFG into a DOT graph */
void oatDumpCFG(CompilationUnit* cUnit, const char* dirPrefix)
{
- FILE* file;
- std::string name(PrettyMethod(cUnit->method_idx, *cUnit->dex_file));
- char startOffset[80];
- sprintf(startOffset, "_%x", cUnit->entryBlock->fallThrough->startOffset);
- char* fileName = (char*) oatNew(cUnit,
- strlen(dirPrefix) +
- name.length() +
- strlen(".dot") + 1, true, kAllocDebugInfo);
- sprintf(fileName, "%s%s%s.dot", dirPrefix, name.c_str(), startOffset);
+ FILE* file;
+ std::string name(PrettyMethod(cUnit->method_idx, *cUnit->dex_file));
+ char startOffset[80];
+ sprintf(startOffset, "_%x", cUnit->entryBlock->fallThrough->startOffset);
+ char* fileName = (char*) oatNew(cUnit, strlen(dirPrefix) +
+ name.length() + strlen(".dot") + 1,
+ true, kAllocDebugInfo);
+ sprintf(fileName, "%s%s%s.dot", dirPrefix, name.c_str(), startOffset);
- /*
- * Convert the special characters into a filesystem- and shell-friendly
- * format.
- */
- int i;
- for (i = strlen(dirPrefix); fileName[i]; i++) {
- if (fileName[i] == '/') {
- fileName[i] = '_';
- } else if (fileName[i] == ';') {
- fileName[i] = '#';
- } else if (fileName[i] == '$') {
- fileName[i] = '+';
- } else if (fileName[i] == '(' || fileName[i] == ')') {
- fileName[i] = '@';
- } else if (fileName[i] == '<' || fileName[i] == '>') {
- fileName[i] = '=';
- }
+ /*
+ * Convert the special characters into a filesystem- and shell-friendly
+ * format.
+ */
+ int i;
+ for (i = strlen(dirPrefix); fileName[i]; i++) {
+ if (fileName[i] == '/') {
+ fileName[i] = '_';
+ } else if (fileName[i] == ';') {
+ fileName[i] = '#';
+ } else if (fileName[i] == '$') {
+ fileName[i] = '+';
+ } else if (fileName[i] == '(' || fileName[i] == ')') {
+ fileName[i] = '@';
+ } else if (fileName[i] == '<' || fileName[i] == '>') {
+ fileName[i] = '=';
}
- file = fopen(fileName, "w");
- if (file == NULL) {
- return;
+ }
+ file = fopen(fileName, "w");
+ if (file == NULL) {
+ return;
+ }
+ fprintf(file, "digraph G {\n");
+
+ fprintf(file, " rankdir=TB\n");
+
+ int numReachableBlocks = cUnit->numReachableBlocks;
+ int idx;
+ const GrowableList *blockList = &cUnit->blockList;
+
+ for (idx = 0; idx < numReachableBlocks; idx++) {
+ int blockIdx = cUnit->dfsOrder.elemList[idx];
+ BasicBlock *bb = (BasicBlock *) oatGrowableListGetElement(blockList,
+ blockIdx);
+ if (bb == NULL) break;
+ if (bb->blockType == kEntryBlock) {
+ fprintf(file, " entry [shape=Mdiamond];\n");
+ } else if (bb->blockType == kExitBlock) {
+ fprintf(file, " exit [shape=Mdiamond];\n");
+ } else if (bb->blockType == kDalvikByteCode) {
+ fprintf(file, " block%04x [shape=record,label = \"{ \\\n",
+ bb->startOffset);
+ const MIR *mir;
+ fprintf(file, " {block id %d\\l}%s\\\n", bb->id,
+ bb->firstMIRInsn ? " | " : " ");
+ for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ fprintf(file, " {%04x %s\\l}%s\\\n", mir->offset,
+ mir->ssaRep ? oatFullDisassembler(cUnit, mir) :
+ Instruction::Name(mir->dalvikInsn.opcode),
+ mir->next ? " | " : " ");
+ }
+ fprintf(file, " }\"];\n\n");
+ } else if (bb->blockType == kExceptionHandling) {
+ char blockName[BLOCK_NAME_LEN];
+
+ oatGetBlockName(bb, blockName);
+ fprintf(file, " %s [shape=invhouse];\n", blockName);
}
- fprintf(file, "digraph G {\n");
- fprintf(file, " rankdir=TB\n");
+ char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
- int numReachableBlocks = cUnit->numReachableBlocks;
- int idx;
- const GrowableList *blockList = &cUnit->blockList;
-
- for (idx = 0; idx < numReachableBlocks; idx++) {
- int blockIdx = cUnit->dfsOrder.elemList[idx];
- BasicBlock *bb = (BasicBlock *) oatGrowableListGetElement(blockList,
- blockIdx);
- if (bb == NULL) break;
- if (bb->blockType == kEntryBlock) {
- fprintf(file, " entry [shape=Mdiamond];\n");
- } else if (bb->blockType == kExitBlock) {
- fprintf(file, " exit [shape=Mdiamond];\n");
- } else if (bb->blockType == kDalvikByteCode) {
- fprintf(file, " block%04x [shape=record,label = \"{ \\\n",
- bb->startOffset);
- const MIR *mir;
- fprintf(file, " {block id %d\\l}%s\\\n", bb->id,
- bb->firstMIRInsn ? " | " : " ");
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- fprintf(file, " {%04x %s\\l}%s\\\n", mir->offset,
- mir->ssaRep ? oatFullDisassembler(cUnit, mir) : Instruction::Name(mir->dalvikInsn.opcode),
- mir->next ? " | " : " ");
- }
- fprintf(file, " }\"];\n\n");
- } else if (bb->blockType == kExceptionHandling) {
- char blockName[BLOCK_NAME_LEN];
-
- oatGetBlockName(bb, blockName);
- fprintf(file, " %s [shape=invhouse];\n", blockName);
- }
-
- char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
-
- if (bb->taken) {
- oatGetBlockName(bb, blockName1);
- oatGetBlockName(bb->taken, blockName2);
- fprintf(file, " %s:s -> %s:n [style=dotted]\n",
- blockName1, blockName2);
- }
- if (bb->fallThrough) {
- oatGetBlockName(bb, blockName1);
- oatGetBlockName(bb->fallThrough, blockName2);
- fprintf(file, " %s:s -> %s:n\n", blockName1, blockName2);
- }
-
- if (bb->successorBlockList.blockListType != kNotUsed) {
- fprintf(file, " succ%04x [shape=%s,label = \"{ \\\n",
- bb->startOffset,
- (bb->successorBlockList.blockListType == kCatch) ?
- "Mrecord" : "record");
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&bb->successorBlockList.blocks,
- &iterator);
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
-
- int succId = 0;
- while (true) {
- if (successorBlockInfo == NULL) break;
-
- BasicBlock *destBlock = successorBlockInfo->block;
- SuccessorBlockInfo *nextSuccessorBlockInfo =
- (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
-
- fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n",
- succId++,
- successorBlockInfo->key,
- destBlock->startOffset,
- (nextSuccessorBlockInfo != NULL) ? " | " : " ");
-
- successorBlockInfo = nextSuccessorBlockInfo;
- }
- fprintf(file, " }\"];\n\n");
-
- oatGetBlockName(bb, blockName1);
- fprintf(file, " %s:s -> succ%04x:n [style=dashed]\n",
- blockName1, bb->startOffset);
-
- if (bb->successorBlockList.blockListType == kPackedSwitch ||
- bb->successorBlockList.blockListType == kSparseSwitch) {
-
- oatGrowableListIteratorInit(&bb->successorBlockList.blocks,
- &iterator);
-
- succId = 0;
- while (true) {
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *)
- oatGrowableListIteratorNext(&iterator);
- if (successorBlockInfo == NULL) break;
-
- BasicBlock *destBlock = successorBlockInfo->block;
-
- oatGetBlockName(destBlock, blockName2);
- fprintf(file, " succ%04x:f%d:e -> %s:n\n",
- bb->startOffset, succId++,
- blockName2);
- }
- }
- }
- fprintf(file, "\n");
-
- /* Display the dominator tree */
- oatGetBlockName(bb, blockName1);
- fprintf(file, " cfg%s [label=\"%s\", shape=none];\n",
- blockName1, blockName1);
- if (bb->iDom) {
- oatGetBlockName(bb->iDom, blockName2);
- fprintf(file, " cfg%s:s -> cfg%s:n\n\n",
- blockName2, blockName1);
- }
+ if (bb->taken) {
+ oatGetBlockName(bb, blockName1);
+ oatGetBlockName(bb->taken, blockName2);
+ fprintf(file, " %s:s -> %s:n [style=dotted]\n",
+ blockName1, blockName2);
}
- fprintf(file, "}\n");
- fclose(file);
+ if (bb->fallThrough) {
+ oatGetBlockName(bb, blockName1);
+ oatGetBlockName(bb->fallThrough, blockName2);
+ fprintf(file, " %s:s -> %s:n\n", blockName1, blockName2);
+ }
+
+ if (bb->successorBlockList.blockListType != kNotUsed) {
+ fprintf(file, " succ%04x [shape=%s,label = \"{ \\\n",
+ bb->startOffset,
+ (bb->successorBlockList.blockListType == kCatch) ?
+ "Mrecord" : "record");
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&bb->successorBlockList.blocks,
+ &iterator);
+ SuccessorBlockInfo *successorBlockInfo =
+ (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
+
+ int succId = 0;
+ while (true) {
+ if (successorBlockInfo == NULL) break;
+
+ BasicBlock *destBlock = successorBlockInfo->block;
+ SuccessorBlockInfo *nextSuccessorBlockInfo =
+ (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
+
+ fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n",
+ succId++,
+ successorBlockInfo->key,
+ destBlock->startOffset,
+ (nextSuccessorBlockInfo != NULL) ? " | " : " ");
+
+ successorBlockInfo = nextSuccessorBlockInfo;
+ }
+ fprintf(file, " }\"];\n\n");
+
+ oatGetBlockName(bb, blockName1);
+ fprintf(file, " %s:s -> succ%04x:n [style=dashed]\n",
+ blockName1, bb->startOffset);
+
+ if (bb->successorBlockList.blockListType == kPackedSwitch ||
+ bb->successorBlockList.blockListType == kSparseSwitch) {
+
+ oatGrowableListIteratorInit(&bb->successorBlockList.blocks,
+ &iterator);
+
+ succId = 0;
+ while (true) {
+ SuccessorBlockInfo *successorBlockInfo = (SuccessorBlockInfo *)
+ oatGrowableListIteratorNext(&iterator);
+ if (successorBlockInfo == NULL) break;
+
+ BasicBlock *destBlock = successorBlockInfo->block;
+
+ oatGetBlockName(destBlock, blockName2);
+ fprintf(file, " succ%04x:f%d:e -> %s:n\n", bb->startOffset,
+ succId++, blockName2);
+ }
+ }
+ }
+ fprintf(file, "\n");
+
+ /* Display the dominator tree */
+ oatGetBlockName(bb, blockName1);
+ fprintf(file, " cfg%s [label=\"%s\", shape=none];\n",
+ blockName1, blockName1);
+ if (bb->iDom) {
+ oatGetBlockName(bb->iDom, blockName2);
+ fprintf(file, " cfg%s:s -> cfg%s:n\n\n", blockName2, blockName1);
+ }
+ }
+ fprintf(file, "}\n");
+ fclose(file);
}
/* Verify if all the successor is connected with all the claimed predecessors */
bool verifyPredInfo(CompilationUnit* cUnit, BasicBlock* bb)
{
- GrowableListIterator iter;
+ GrowableListIterator iter;
- oatGrowableListIteratorInit(bb->predecessors, &iter);
- while (true) {
- BasicBlock *predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
- if (!predBB) break;
- bool found = false;
- if (predBB->taken == bb) {
+ oatGrowableListIteratorInit(bb->predecessors, &iter);
+ while (true) {
+ BasicBlock *predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
+ if (!predBB) break;
+ bool found = false;
+ if (predBB->taken == bb) {
+ found = true;
+ } else if (predBB->fallThrough == bb) {
+ found = true;
+ } else if (predBB->successorBlockList.blockListType != kNotUsed) {
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&predBB->successorBlockList.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successorBlockInfo = (SuccessorBlockInfo *)
+ oatGrowableListIteratorNext(&iterator);
+ if (successorBlockInfo == NULL) break;
+ BasicBlock *succBB = successorBlockInfo->block;
+ if (succBB == bb) {
found = true;
- } else if (predBB->fallThrough == bb) {
- found = true;
- } else if (predBB->successorBlockList.blockListType != kNotUsed) {
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&predBB->successorBlockList.blocks,
- &iterator);
- while (true) {
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *)
- oatGrowableListIteratorNext(&iterator);
- if (successorBlockInfo == NULL) break;
- BasicBlock *succBB = successorBlockInfo->block;
- if (succBB == bb) {
- found = true;
- break;
- }
- }
+ break;
}
- if (found == false) {
- char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
- oatGetBlockName(bb, blockName1);
- oatGetBlockName(predBB, blockName2);
- oatDumpCFG(cUnit, "/sdcard/cfg/");
- LOG(FATAL) << "Successor " << blockName1 << "not found from "
- << blockName2;
- }
+ }
}
- return true;
+ if (found == false) {
+ char blockName1[BLOCK_NAME_LEN], blockName2[BLOCK_NAME_LEN];
+ oatGetBlockName(bb, blockName1);
+ oatGetBlockName(predBB, blockName2);
+ oatDumpCFG(cUnit, "/sdcard/cfg/");
+ LOG(FATAL) << "Successor " << blockName1 << "not found from "
+ << blockName2;
+ }
+ }
+ return true;
}
/* Identify code range in try blocks and set up the empty catch blocks */
void processTryCatchBlocks(CompilationUnit* cUnit)
{
- const DexFile::CodeItem* code_item = cUnit->code_item;
- int triesSize = code_item->tries_size_;
- int offset;
+ const DexFile::CodeItem* code_item = cUnit->code_item;
+ int triesSize = code_item->tries_size_;
+ int offset;
- if (triesSize == 0) {
- return;
+ if (triesSize == 0) {
+ return;
+ }
+
+ ArenaBitVector* tryBlockAddr = cUnit->tryBlockAddr;
+
+ for (int i = 0; i < triesSize; i++) {
+ const DexFile::TryItem* pTry =
+ DexFile::GetTryItems(*code_item, i);
+ int startOffset = pTry->start_addr_;
+ int endOffset = startOffset + pTry->insn_count_;
+ for (offset = startOffset; offset < endOffset; offset++) {
+ oatSetBit(cUnit, tryBlockAddr, offset);
}
+ }
- ArenaBitVector* tryBlockAddr = cUnit->tryBlockAddr;
-
- for (int i = 0; i < triesSize; i++) {
- const DexFile::TryItem* pTry =
- DexFile::GetTryItems(*code_item, i);
- int startOffset = pTry->start_addr_;
- int endOffset = startOffset + pTry->insn_count_;
- for (offset = startOffset; offset < endOffset; offset++) {
- oatSetBit(cUnit, tryBlockAddr, offset);
- }
+ // Iterate over each of the handlers to enqueue the empty Catch blocks
+ const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+ for (uint32_t idx = 0; idx < handlers_size; idx++) {
+ CatchHandlerIterator iterator(handlers_ptr);
+ for (; iterator.HasNext(); iterator.Next()) {
+ uint32_t address = iterator.GetHandlerAddress();
+ findBlock(cUnit, address, false /* split */, true /*create*/,
+ /* immedPredBlockP */ NULL);
}
-
- // Iterate over each of the handlers to enqueue the empty Catch blocks
- const byte* handlers_ptr =
- DexFile::GetCatchHandlerData(*code_item, 0);
- uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
- for (uint32_t idx = 0; idx < handlers_size; idx++) {
- CatchHandlerIterator iterator(handlers_ptr);
- for (; iterator.HasNext(); iterator.Next()) {
- uint32_t address = iterator.GetHandlerAddress();
- findBlock(cUnit, address, false /* split */, true /*create*/,
- /* immedPredBlockP */ NULL);
- }
- handlers_ptr = iterator.EndDataPointer();
- }
+ handlers_ptr = iterator.EndDataPointer();
+ }
}
/* Process instructions with the kBranch flag */
BasicBlock* processCanBranch(CompilationUnit* cUnit, BasicBlock* curBlock,
- MIR* insn, int curOffset, int width, int flags,
- const u2* codePtr, const u2* codeEnd)
+ MIR* insn, int curOffset, int width, int flags,
+ const u2* codePtr, const u2* codeEnd)
{
- int target = curOffset;
- switch (insn->dalvikInsn.opcode) {
- case Instruction::GOTO:
- case Instruction::GOTO_16:
- case Instruction::GOTO_32:
- target += (int) insn->dalvikInsn.vA;
- break;
- case Instruction::IF_EQ:
- case Instruction::IF_NE:
- case Instruction::IF_LT:
- case Instruction::IF_GE:
- case Instruction::IF_GT:
- case Instruction::IF_LE:
- target += (int) insn->dalvikInsn.vC;
- break;
- case Instruction::IF_EQZ:
- case Instruction::IF_NEZ:
- case Instruction::IF_LTZ:
- case Instruction::IF_GEZ:
- case Instruction::IF_GTZ:
- case Instruction::IF_LEZ:
- target += (int) insn->dalvikInsn.vB;
- break;
- default:
- LOG(FATAL) << "Unexpected opcode(" << (int)insn->dalvikInsn.opcode
- << ") with kBranch set";
- }
- BasicBlock *takenBlock = findBlock(cUnit, target,
- /* split */
- true,
- /* create */
- true,
- /* immedPredBlockP */
- &curBlock);
- curBlock->taken = takenBlock;
- oatInsertGrowableList(cUnit, takenBlock->predecessors, (intptr_t)curBlock);
+ int target = curOffset;
+ switch (insn->dalvikInsn.opcode) {
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ target += (int) insn->dalvikInsn.vA;
+ break;
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ target += (int) insn->dalvikInsn.vC;
+ break;
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ:
+ target += (int) insn->dalvikInsn.vB;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode(" << (int)insn->dalvikInsn.opcode
+ << ") with kBranch set";
+ }
+ BasicBlock *takenBlock = findBlock(cUnit, target,
+ /* split */
+ true,
+ /* create */
+ true,
+ /* immedPredBlockP */
+ &curBlock);
+ curBlock->taken = takenBlock;
+ oatInsertGrowableList(cUnit, takenBlock->predecessors, (intptr_t)curBlock);
- /* Always terminate the current block for conditional branches */
- if (flags & Instruction::kContinue) {
- BasicBlock *fallthroughBlock = findBlock(cUnit,
- curOffset + width,
- /*
- * If the method is processed
- * in sequential order from the
- * beginning, we don't need to
- * specify split for continue
- * blocks. However, this
- * routine can be called by
- * compileLoop, which starts
- * parsing the method from an
- * arbitrary address in the
- * method body.
- */
- true,
- /* create */
- true,
- /* immedPredBlockP */
- &curBlock);
- curBlock->fallThrough = fallthroughBlock;
- oatInsertGrowableList(cUnit, fallthroughBlock->predecessors,
- (intptr_t)curBlock);
- } else if (codePtr < codeEnd) {
- /* Create a fallthrough block for real instructions (incl. NOP) */
- if (contentIsInsn(codePtr)) {
- findBlock(cUnit, curOffset + width,
- /* split */
- false,
- /* create */
- true,
- /* immedPredBlockP */
- NULL);
- }
+ /* Always terminate the current block for conditional branches */
+ if (flags & Instruction::kContinue) {
+ BasicBlock *fallthroughBlock = findBlock(cUnit,
+ curOffset + width,
+ /*
+ * If the method is processed
+ * in sequential order from the
+ * beginning, we don't need to
+ * specify split for continue
+ * blocks. However, this
+ * routine can be called by
+ * compileLoop, which starts
+ * parsing the method from an
+ * arbitrary address in the
+ * method body.
+ */
+ true,
+ /* create */
+ true,
+ /* immedPredBlockP */
+ &curBlock);
+ curBlock->fallThrough = fallthroughBlock;
+ oatInsertGrowableList(cUnit, fallthroughBlock->predecessors,
+ (intptr_t)curBlock);
+ } else if (codePtr < codeEnd) {
+ /* Create a fallthrough block for real instructions (incl. NOP) */
+ if (contentIsInsn(codePtr)) {
+ findBlock(cUnit, curOffset + width,
+ /* split */
+ false,
+ /* create */
+ true,
+ /* immedPredBlockP */
+ NULL);
}
- return curBlock;
+ }
+ return curBlock;
}
/* Process instructions with the kSwitch flag */
void processCanSwitch(CompilationUnit* cUnit, BasicBlock* curBlock,
MIR* insn, int curOffset, int width, int flags)
{
- u2* switchData= (u2 *) (cUnit->insns + curOffset +
- insn->dalvikInsn.vB);
- int size;
- int* keyTable;
- int* targetTable;
- int i;
- int firstKey;
+ u2* switchData= (u2 *) (cUnit->insns + curOffset + insn->dalvikInsn.vB);
+ int size;
+ int* keyTable;
+ int* targetTable;
+ int i;
+ int firstKey;
- /*
- * Packed switch data format:
- * ushort ident = 0x0100 magic value
- * ushort size number of entries in the table
- * int first_key first (and lowest) switch case value
- * int targets[size] branch targets, relative to switch opcode
- *
- * Total size is (4+size*2) 16-bit code units.
- */
- if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
- DCHECK_EQ(static_cast<int>(switchData[0]), static_cast<int>(Instruction::kPackedSwitchSignature));
- size = switchData[1];
- firstKey = switchData[2] | (switchData[3] << 16);
- targetTable = (int *) &switchData[4];
- keyTable = NULL; // Make the compiler happy
- /*
- * Sparse switch data format:
- * ushort ident = 0x0200 magic value
- * ushort size number of entries in the table; > 0
- * int keys[size] keys, sorted low-to-high; 32-bit aligned
- * int targets[size] branch targets, relative to switch opcode
- *
- * Total size is (2+size*4) 16-bit code units.
- */
- } else {
- DCHECK_EQ(static_cast<int>(switchData[0]), static_cast<int>(Instruction::kSparseSwitchSignature));
- size = switchData[1];
- keyTable = (int *) &switchData[2];
- targetTable = (int *) &switchData[2 + size*2];
- firstKey = 0; // To make the compiler happy
- }
+ /*
+ * Packed switch data format:
+ * ushort ident = 0x0100 magic value
+ * ushort size number of entries in the table
+ * int first_key first (and lowest) switch case value
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (4+size*2) 16-bit code units.
+ */
+ if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
+ DCHECK_EQ(static_cast<int>(switchData[0]),
+ static_cast<int>(Instruction::kPackedSwitchSignature));
+ size = switchData[1];
+ firstKey = switchData[2] | (switchData[3] << 16);
+ targetTable = (int *) &switchData[4];
+ keyTable = NULL; // Make the compiler happy
+ /*
+ * Sparse switch data format:
+ * ushort ident = 0x0200 magic value
+ * ushort size number of entries in the table; > 0
+ * int keys[size] keys, sorted low-to-high; 32-bit aligned
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (2+size*4) 16-bit code units.
+ */
+ } else {
+ DCHECK_EQ(static_cast<int>(switchData[0]),
+ static_cast<int>(Instruction::kSparseSwitchSignature));
+ size = switchData[1];
+ keyTable = (int *) &switchData[2];
+ targetTable = (int *) &switchData[2 + size*2];
+ firstKey = 0; // To make the compiler happy
+ }
- if (curBlock->successorBlockList.blockListType != kNotUsed) {
- LOG(FATAL) << "Successor block list already in use: " <<
- (int)curBlock->successorBlockList.blockListType;
- }
- curBlock->successorBlockList.blockListType =
+ if (curBlock->successorBlockList.blockListType != kNotUsed) {
+ LOG(FATAL) << "Successor block list already in use: "
+ << (int)curBlock->successorBlockList.blockListType;
+ }
+ curBlock->successorBlockList.blockListType =
+ (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
+ kPackedSwitch : kSparseSwitch;
+ oatInitGrowableList(cUnit, &curBlock->successorBlockList.blocks, size,
+ kListSuccessorBlocks);
+
+ for (i = 0; i < size; i++) {
+ BasicBlock *caseBlock = findBlock(cUnit, curOffset + targetTable[i],
+ /* split */
+ true,
+ /* create */
+ true,
+ /* immedPredBlockP */
+ &curBlock);
+ SuccessorBlockInfo *successorBlockInfo =
+ (SuccessorBlockInfo *) oatNew(cUnit, sizeof(SuccessorBlockInfo),
+ false, kAllocSuccessor);
+ successorBlockInfo->block = caseBlock;
+ successorBlockInfo->key =
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
- kPackedSwitch : kSparseSwitch;
- oatInitGrowableList(cUnit, &curBlock->successorBlockList.blocks, size,
- kListSuccessorBlocks);
-
- for (i = 0; i < size; i++) {
- BasicBlock *caseBlock = findBlock(cUnit, curOffset + targetTable[i],
- /* split */
- true,
- /* create */
- true,
- /* immedPredBlockP */
- &curBlock);
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *) oatNew(cUnit, sizeof(SuccessorBlockInfo),
- false, kAllocSuccessor);
- successorBlockInfo->block = caseBlock;
- successorBlockInfo->key = (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH)?
- firstKey + i : keyTable[i];
- oatInsertGrowableList(cUnit, &curBlock->successorBlockList.blocks,
- (intptr_t) successorBlockInfo);
- oatInsertGrowableList(cUnit, caseBlock->predecessors,
- (intptr_t)curBlock);
- }
-
- /* Fall-through case */
- BasicBlock* fallthroughBlock = findBlock(cUnit,
- curOffset + width,
- /* split */
- false,
- /* create */
- true,
- /* immedPredBlockP */
- NULL);
- curBlock->fallThrough = fallthroughBlock;
- oatInsertGrowableList(cUnit, fallthroughBlock->predecessors,
+ firstKey + i : keyTable[i];
+ oatInsertGrowableList(cUnit, &curBlock->successorBlockList.blocks,
+ (intptr_t) successorBlockInfo);
+ oatInsertGrowableList(cUnit, caseBlock->predecessors,
(intptr_t)curBlock);
+ }
+
+ /* Fall-through case */
+ BasicBlock* fallthroughBlock = findBlock(cUnit,
+ curOffset + width,
+ /* split */
+ false,
+ /* create */
+ true,
+ /* immedPredBlockP */
+ NULL);
+ curBlock->fallThrough = fallthroughBlock;
+ oatInsertGrowableList(cUnit, fallthroughBlock->predecessors,
+ (intptr_t)curBlock);
}
/* Process instructions with the kThrow flag */
@@ -653,77 +651,76 @@
ArenaBitVector* tryBlockAddr, const u2* codePtr,
const u2* codeEnd)
{
- const DexFile::CodeItem* code_item = cUnit->code_item;
+ const DexFile::CodeItem* code_item = cUnit->code_item;
- /* In try block */
- if (oatIsBitSet(tryBlockAddr, curOffset)) {
- CatchHandlerIterator iterator(*code_item, curOffset);
+ /* In try block */
+ if (oatIsBitSet(tryBlockAddr, curOffset)) {
+ CatchHandlerIterator iterator(*code_item, curOffset);
- if (curBlock->successorBlockList.blockListType != kNotUsed) {
- LOG(FATAL) << "Successor block list already in use: " <<
- (int)curBlock->successorBlockList.blockListType;
- }
-
- curBlock->successorBlockList.blockListType = kCatch;
- oatInitGrowableList(cUnit, &curBlock->successorBlockList.blocks, 2,
- kListSuccessorBlocks);
-
- for (;iterator.HasNext(); iterator.Next()) {
- BasicBlock *catchBlock = findBlock(cUnit, iterator.GetHandlerAddress(),
- false /* split*/,
- false /* creat */,
- NULL /* immedPredBlockP */);
- catchBlock->catchEntry = true;
- SuccessorBlockInfo *successorBlockInfo = (SuccessorBlockInfo *)
- oatNew(cUnit, sizeof(SuccessorBlockInfo), false,
- kAllocSuccessor);
- successorBlockInfo->block = catchBlock;
- successorBlockInfo->key = iterator.GetHandlerTypeIndex();
- oatInsertGrowableList(cUnit, &curBlock->successorBlockList.blocks,
- (intptr_t) successorBlockInfo);
- oatInsertGrowableList(cUnit, catchBlock->predecessors,
- (intptr_t)curBlock);
- }
- } else {
- BasicBlock *ehBlock = oatNewBB(cUnit, kExceptionHandling,
- cUnit->numBlocks++);
- curBlock->taken = ehBlock;
- oatInsertGrowableList(cUnit, &cUnit->blockList, (intptr_t) ehBlock);
- ehBlock->startOffset = curOffset;
- oatInsertGrowableList(cUnit, ehBlock->predecessors, (intptr_t)curBlock);
+ if (curBlock->successorBlockList.blockListType != kNotUsed) {
+ LOG(FATAL) << "Successor block list already in use: "
+ << (int)curBlock->successorBlockList.blockListType;
}
- /*
- * Force the current block to terminate.
- *
- * Data may be present before codeEnd, so we need to parse it to know
- * whether it is code or data.
- */
- if (codePtr < codeEnd) {
- /* Create a fallthrough block for real instructions (incl. NOP) */
- if (contentIsInsn(codePtr)) {
- BasicBlock *fallthroughBlock = findBlock(cUnit,
- curOffset + width,
- /* split */
- false,
- /* create */
- true,
- /* immedPredBlockP */
- NULL);
- /*
- * THROW is an unconditional branch. NOTE:
- * THROW_VERIFICATION_ERROR is also an unconditional
- * branch, but we shouldn't treat it as such until we have
- * a dead code elimination pass (which won't be important
- * until inlining w/ constant propogation is implemented.
- */
- if (insn->dalvikInsn.opcode != Instruction::THROW) {
- curBlock->fallThrough = fallthroughBlock;
- oatInsertGrowableList(cUnit, fallthroughBlock->predecessors,
- (intptr_t)curBlock);
- }
- }
+ curBlock->successorBlockList.blockListType = kCatch;
+ oatInitGrowableList(cUnit, &curBlock->successorBlockList.blocks, 2,
+ kListSuccessorBlocks);
+
+ for (;iterator.HasNext(); iterator.Next()) {
+ BasicBlock *catchBlock = findBlock(cUnit, iterator.GetHandlerAddress(),
+ false /* split*/,
+ false /* creat */,
+ NULL /* immedPredBlockP */);
+ catchBlock->catchEntry = true;
+ SuccessorBlockInfo *successorBlockInfo = (SuccessorBlockInfo *)
+ oatNew(cUnit, sizeof(SuccessorBlockInfo), false, kAllocSuccessor);
+ successorBlockInfo->block = catchBlock;
+ successorBlockInfo->key = iterator.GetHandlerTypeIndex();
+ oatInsertGrowableList(cUnit, &curBlock->successorBlockList.blocks,
+ (intptr_t) successorBlockInfo);
+ oatInsertGrowableList(cUnit, catchBlock->predecessors,
+ (intptr_t)curBlock);
}
+ } else {
+ BasicBlock *ehBlock = oatNewBB(cUnit, kExceptionHandling,
+ cUnit->numBlocks++);
+ curBlock->taken = ehBlock;
+ oatInsertGrowableList(cUnit, &cUnit->blockList, (intptr_t) ehBlock);
+ ehBlock->startOffset = curOffset;
+ oatInsertGrowableList(cUnit, ehBlock->predecessors, (intptr_t)curBlock);
+ }
+
+ /*
+ * Force the current block to terminate.
+ *
+ * Data may be present before codeEnd, so we need to parse it to know
+ * whether it is code or data.
+ */
+ if (codePtr < codeEnd) {
+ /* Create a fallthrough block for real instructions (incl. NOP) */
+ if (contentIsInsn(codePtr)) {
+ BasicBlock *fallthroughBlock = findBlock(cUnit,
+ curOffset + width,
+ /* split */
+ false,
+ /* create */
+ true,
+ /* immedPredBlockP */
+ NULL);
+ /*
+ * THROW is an unconditional branch. NOTE:
+ * THROW_VERIFICATION_ERROR is also an unconditional
+ * branch, but we shouldn't treat it as such until we have
+ * a dead code elimination pass (which won't be important
+ * until inlining w/ constant propogation is implemented.
+ */
+ if (insn->dalvikInsn.opcode != Instruction::THROW) {
+ curBlock->fallThrough = fallthroughBlock;
+ oatInsertGrowableList(cUnit, fallthroughBlock->predecessors,
+ (intptr_t)curBlock);
+ }
+ }
+ }
}
void oatInit(CompilationUnit* cUnit, const Compiler& compiler) {
@@ -741,383 +738,387 @@
const ClassLoader* class_loader,
const DexFile& dex_file)
{
- VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
+ VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
- const u2* codePtr = code_item->insns_;
- const u2* codeEnd = code_item->insns_ + code_item->insns_size_in_code_units_;
- int numBlocks = 0;
- unsigned int curOffset = 0;
+ const u2* codePtr = code_item->insns_;
+ const u2* codeEnd = code_item->insns_ + code_item->insns_size_in_code_units_;
+ int numBlocks = 0;
+ unsigned int curOffset = 0;
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- UniquePtr<CompilationUnit> cUnit(new CompilationUnit);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ UniquePtr<CompilationUnit> cUnit(new CompilationUnit);
- oatInit(cUnit.get(), compiler);
+ oatInit(cUnit.get(), compiler);
- cUnit->compiler = &compiler;
- cUnit->class_linker = class_linker;
- cUnit->dex_file = &dex_file;
- cUnit->dex_cache = class_linker->FindDexCache(dex_file);
- cUnit->method_idx = method_idx;
- cUnit->code_item = code_item;
- cUnit->access_flags = access_flags;
- cUnit->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
- cUnit->instructionSet = compiler.GetInstructionSet();
- cUnit->insns = code_item->insns_;
- cUnit->insnsSize = code_item->insns_size_in_code_units_;
- cUnit->numIns = code_item->ins_size_;
- cUnit->numRegs = code_item->registers_size_ - cUnit->numIns;
- cUnit->numOuts = code_item->outs_size_;
- /* Adjust this value accordingly once inlining is performed */
- cUnit->numDalvikRegisters = code_item->registers_size_;
- // TODO: set this from command line
- cUnit->compilerFlipMatch = false;
- bool useMatch = !cUnit->compilerMethodMatch.empty();
- bool match = useMatch && (cUnit->compilerFlipMatch ^
- (PrettyMethod(method_idx, dex_file).find(cUnit->compilerMethodMatch) != std::string::npos));
- if (!useMatch || match) {
- cUnit->disableOpt = kCompilerOptimizerDisableFlags;
- cUnit->enableDebug = kCompilerDebugFlags;
- cUnit->printMe = VLOG_IS_ON(compiler) || (cUnit->enableDebug & (1 << kDebugVerbose));
- }
- if (cUnit->instructionSet == kX86) {
- // Disable optimizations on X86 for now
- cUnit->disableOpt = -1;
- }
- /* Are we generating code for the debugger? */
- if (compiler.IsDebuggingSupported()) {
- cUnit->genDebugger = true;
- // Yes, disable most optimizations
- cUnit->disableOpt |= (
- (1 << kLoadStoreElimination) |
- (1 << kLoadHoisting) |
- (1 << kSuppressLoads) |
- (1 << kPromoteRegs) |
- (1 << kBBOpt) |
- (1 << kMatch) |
- (1 << kTrackLiveTemps));
+ cUnit->compiler = &compiler;
+ cUnit->class_linker = class_linker;
+ cUnit->dex_file = &dex_file;
+ cUnit->dex_cache = class_linker->FindDexCache(dex_file);
+ cUnit->method_idx = method_idx;
+ cUnit->code_item = code_item;
+ cUnit->access_flags = access_flags;
+ cUnit->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+ cUnit->instructionSet = compiler.GetInstructionSet();
+ cUnit->insns = code_item->insns_;
+ cUnit->insnsSize = code_item->insns_size_in_code_units_;
+ cUnit->numIns = code_item->ins_size_;
+ cUnit->numRegs = code_item->registers_size_ - cUnit->numIns;
+ cUnit->numOuts = code_item->outs_size_;
+ /* Adjust this value accordingly once inlining is performed */
+ cUnit->numDalvikRegisters = code_item->registers_size_;
+ // TODO: set this from command line
+ cUnit->compilerFlipMatch = false;
+ bool useMatch = !cUnit->compilerMethodMatch.empty();
+ bool match = useMatch && (cUnit->compilerFlipMatch ^
+ (PrettyMethod(method_idx, dex_file).find(cUnit->compilerMethodMatch) !=
+ std::string::npos));
+ if (!useMatch || match) {
+ cUnit->disableOpt = kCompilerOptimizerDisableFlags;
+ cUnit->enableDebug = kCompilerDebugFlags;
+ cUnit->printMe = VLOG_IS_ON(compiler) ||
+ (cUnit->enableDebug & (1 << kDebugVerbose));
+ }
+ if (cUnit->instructionSet == kX86) {
+ // Disable optimizations on X86 for now
+ cUnit->disableOpt = -1;
+ }
+ /* Are we generating code for the debugger? */
+ if (compiler.IsDebuggingSupported()) {
+ cUnit->genDebugger = true;
+ // Yes, disable most optimizations
+ cUnit->disableOpt |= (
+ (1 << kLoadStoreElimination) |
+ (1 << kLoadHoisting) |
+ (1 << kSuppressLoads) |
+ (1 << kPromoteRegs) |
+ (1 << kBBOpt) |
+ (1 << kMatch) |
+ (1 << kTrackLiveTemps));
+ }
+
+ /* Gathering opcode stats? */
+ if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
+ cUnit->opcodeCount = (int*)oatNew(cUnit.get(),
+ kNumPackedOpcodes * sizeof(int), true, kAllocMisc);
+ }
+
+ /* Assume non-throwing leaf */
+ cUnit->attrs = (METHOD_IS_LEAF | METHOD_IS_THROW_FREE);
+
+ /* Initialize the block list, estimate size based on insnsSize */
+ oatInitGrowableList(cUnit.get(), &cUnit->blockList, cUnit->insnsSize,
+ kListBlockList);
+
+ /* Initialize the switchTables list */
+ oatInitGrowableList(cUnit.get(), &cUnit->switchTables, 4,
+ kListSwitchTables);
+
+ /* Intialize the fillArrayData list */
+ oatInitGrowableList(cUnit.get(), &cUnit->fillArrayData, 4,
+ kListFillArrayData);
+
+ /* Intialize the throwLaunchpads list, estimate size based on insnsSize */
+ oatInitGrowableList(cUnit.get(), &cUnit->throwLaunchpads, cUnit->insnsSize,
+ kListThrowLaunchPads);
+
+ /* Intialize the instrinsicLaunchpads list */
+ oatInitGrowableList(cUnit.get(), &cUnit->intrinsicLaunchpads, 4,
+ kListMisc);
+
+
+ /* Intialize the suspendLaunchpads list */
+ oatInitGrowableList(cUnit.get(), &cUnit->suspendLaunchpads, 2048,
+ kListSuspendLaunchPads);
+
+ /* Allocate the bit-vector to track the beginning of basic blocks */
+ ArenaBitVector *tryBlockAddr = oatAllocBitVector(cUnit.get(),
+ cUnit->insnsSize,
+ true /* expandable */);
+ cUnit->tryBlockAddr = tryBlockAddr;
+
+ /* Create the default entry and exit blocks and enter them to the list */
+ BasicBlock *entryBlock = oatNewBB(cUnit.get(), kEntryBlock, numBlocks++);
+ BasicBlock *exitBlock = oatNewBB(cUnit.get(), kExitBlock, numBlocks++);
+
+ cUnit->entryBlock = entryBlock;
+ cUnit->exitBlock = exitBlock;
+
+ oatInsertGrowableList(cUnit.get(), &cUnit->blockList, (intptr_t) entryBlock);
+ oatInsertGrowableList(cUnit.get(), &cUnit->blockList, (intptr_t) exitBlock);
+
+ /* Current block to record parsed instructions */
+ BasicBlock *curBlock = oatNewBB(cUnit.get(), kDalvikByteCode, numBlocks++);
+ curBlock->startOffset = 0;
+ oatInsertGrowableList(cUnit.get(), &cUnit->blockList, (intptr_t) curBlock);
+ /* Add first block to the fast lookup cache */
+ cUnit->blockMap.Put(curBlock->startOffset, curBlock);
+ entryBlock->fallThrough = curBlock;
+ oatInsertGrowableList(cUnit.get(), curBlock->predecessors,
+ (intptr_t)entryBlock);
+
+ /*
+ * Store back the number of blocks since new blocks may be created of
+ * accessing cUnit.
+ */
+ cUnit->numBlocks = numBlocks;
+
+ /* Identify code range in try blocks and set up the empty catch blocks */
+ processTryCatchBlocks(cUnit.get());
+
+ /* Set up for simple method detection */
+ int numPatterns = sizeof(specialPatterns)/sizeof(specialPatterns[0]);
+ bool livePattern = (numPatterns > 0) && !(cUnit->disableOpt & (1 << kMatch));
+ bool* deadPattern = (bool*)oatNew(cUnit.get(), sizeof(bool) * numPatterns,
+ kAllocMisc);
+ SpecialCaseHandler specialCase = kNoHandler;
+ int patternPos = 0;
+
+ /* Parse all instructions and put them into containing basic blocks */
+ while (codePtr < codeEnd) {
+ MIR *insn = (MIR *) oatNew(cUnit.get(), sizeof(MIR), true, kAllocMIR);
+ insn->offset = curOffset;
+ int width = parseInsn(cUnit.get(), codePtr, &insn->dalvikInsn, false);
+ insn->width = width;
+ Instruction::Code opcode = insn->dalvikInsn.opcode;
+ if (cUnit->opcodeCount != NULL) {
+ cUnit->opcodeCount[static_cast<int>(opcode)]++;
}
- /* Gathering opcode stats? */
- if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
- cUnit->opcodeCount = (int*)oatNew(cUnit.get(),
- kNumPackedOpcodes * sizeof(int), true, kAllocMisc);
+ /* Terminate when the data section is seen */
+ if (width == 0)
+ break;
+
+ /* Possible simple method? */
+ if (livePattern) {
+ livePattern = false;
+ specialCase = kNoHandler;
+ for (int i = 0; i < numPatterns; i++) {
+ if (!deadPattern[i]) {
+ if (specialPatterns[i].opcodes[patternPos] == opcode) {
+ livePattern = true;
+ specialCase = specialPatterns[i].handlerCode;
+ } else {
+ deadPattern[i] = true;
+ }
+ }
+ }
+ patternPos++;
}
- /* Assume non-throwing leaf */
- cUnit->attrs = (METHOD_IS_LEAF | METHOD_IS_THROW_FREE);
+ oatAppendMIR(curBlock, insn);
- /* Initialize the block list, estimate size based on insnsSize */
- oatInitGrowableList(cUnit.get(), &cUnit->blockList, cUnit->insnsSize,
- kListBlockList);
+ codePtr += width;
+ int flags = Instruction::Flags(insn->dalvikInsn.opcode);
- /* Initialize the switchTables list */
- oatInitGrowableList(cUnit.get(), &cUnit->switchTables, 4,
- kListSwitchTables);
+ int dfFlags = oatDataFlowAttributes[insn->dalvikInsn.opcode];
- /* Intialize the fillArrayData list */
- oatInitGrowableList(cUnit.get(), &cUnit->fillArrayData, 4,
- kListFillArrayData);
+ if (dfFlags & DF_HAS_DEFS) {
+ cUnit->defCount += (dfFlags & DF_DA_WIDE) ? 2 : 1;
+ }
- /* Intialize the throwLaunchpads list, estimate size based on insnsSize */
- oatInitGrowableList(cUnit.get(), &cUnit->throwLaunchpads, cUnit->insnsSize,
- kListThrowLaunchPads);
+ if (flags & Instruction::kBranch) {
+ curBlock = processCanBranch(cUnit.get(), curBlock, insn, curOffset,
+ width, flags, codePtr, codeEnd);
+ } else if (flags & Instruction::kReturn) {
+ curBlock->fallThrough = exitBlock;
+ oatInsertGrowableList(cUnit.get(), exitBlock->predecessors,
+ (intptr_t)curBlock);
+ /*
+ * Terminate the current block if there are instructions
+ * afterwards.
+ */
+ if (codePtr < codeEnd) {
+ /*
+ * Create a fallthrough block for real instructions
+ * (incl. NOP).
+ */
+ if (contentIsInsn(codePtr)) {
+ findBlock(cUnit.get(), curOffset + width,
+ /* split */
+ false,
+ /* create */
+ true,
+ /* immedPredBlockP */
+ NULL);
+ }
+ }
+ } else if (flags & Instruction::kThrow) {
+ processCanThrow(cUnit.get(), curBlock, insn, curOffset, width, flags,
+ tryBlockAddr, codePtr, codeEnd);
+ } else if (flags & Instruction::kSwitch) {
+ processCanSwitch(cUnit.get(), curBlock, insn, curOffset, width, flags);
+ }
+ curOffset += width;
+ BasicBlock *nextBlock = findBlock(cUnit.get(), curOffset,
+ /* split */
+ false,
+ /* create */
+ false,
+ /* immedPredBlockP */
+ NULL);
+ if (nextBlock) {
+ /*
+ * The next instruction could be the target of a previously parsed
+ * forward branch so a block is already created. If the current
+ * instruction is not an unconditional branch, connect them through
+ * the fall-through link.
+ */
+ DCHECK(curBlock->fallThrough == NULL ||
+ curBlock->fallThrough == nextBlock ||
+ curBlock->fallThrough == exitBlock);
- /* Intialize the instrinsicLaunchpads list */
- oatInitGrowableList(cUnit.get(), &cUnit->intrinsicLaunchpads, 4,
- kListMisc);
+ if ((curBlock->fallThrough == NULL) && (flags & Instruction::kContinue)) {
+ curBlock->fallThrough = nextBlock;
+ oatInsertGrowableList(cUnit.get(), nextBlock->predecessors,
+ (intptr_t)curBlock);
+ }
+ curBlock = nextBlock;
+ }
+ }
+ if (!(cUnit->disableOpt & (1 << kSkipLargeMethodOptimization))) {
+ if ((cUnit->numBlocks > MANY_BLOCKS) ||
+ ((cUnit->numBlocks > MANY_BLOCKS_INITIALIZER) &&
+ PrettyMethod(method_idx, dex_file, false).find("init>") !=
+ std::string::npos)) {
+ cUnit->qdMode = true;
+ }
+ }
- /* Intialize the suspendLaunchpads list */
- oatInitGrowableList(cUnit.get(), &cUnit->suspendLaunchpads, 2048,
- kListSuspendLaunchPads);
+ if (cUnit->qdMode) {
+ cUnit->disableDataflow = true;
+ // Disable optimization which require dataflow/ssa
+ cUnit->disableOpt |=
+ (1 << kNullCheckElimination) |
+ (1 << kBBOpt) |
+ (1 << kPromoteRegs);
+ if (cUnit->printMe) {
+ LOG(INFO) << "QD mode enabled: "
+ << PrettyMethod(method_idx, dex_file)
+ << " too big: " << cUnit->numBlocks;
+ }
+ }
- /* Allocate the bit-vector to track the beginning of basic blocks */
- ArenaBitVector *tryBlockAddr = oatAllocBitVector(cUnit.get(),
- cUnit->insnsSize,
- true /* expandable */);
- cUnit->tryBlockAddr = tryBlockAddr;
+ if (cUnit->printMe) {
+ oatDumpCompilationUnit(cUnit.get());
+ }
- /* Create the default entry and exit blocks and enter them to the list */
- BasicBlock *entryBlock = oatNewBB(cUnit.get(), kEntryBlock, numBlocks++);
- BasicBlock *exitBlock = oatNewBB(cUnit.get(), kExitBlock, numBlocks++);
+ if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
+ /* Verify if all blocks are connected as claimed */
+ oatDataFlowAnalysisDispatcher(cUnit.get(), verifyPredInfo, kAllNodes,
+ false /* isIterative */);
+ }
- cUnit->entryBlock = entryBlock;
- cUnit->exitBlock = exitBlock;
+ /* Perform SSA transformation for the whole method */
+ oatMethodSSATransformation(cUnit.get());
- oatInsertGrowableList(cUnit.get(), &cUnit->blockList, (intptr_t) entryBlock);
- oatInsertGrowableList(cUnit.get(), &cUnit->blockList, (intptr_t) exitBlock);
+ /* Detect loops */
+ oatMethodLoopDetection(cUnit.get());
- /* Current block to record parsed instructions */
- BasicBlock *curBlock = oatNewBB(cUnit.get(), kDalvikByteCode, numBlocks++);
- curBlock->startOffset = 0;
- oatInsertGrowableList(cUnit.get(), &cUnit->blockList, (intptr_t) curBlock);
- /* Add first block to the fast lookup cache */
- cUnit->blockMap.Put(curBlock->startOffset, curBlock);
- entryBlock->fallThrough = curBlock;
- oatInsertGrowableList(cUnit.get(), curBlock->predecessors,
- (intptr_t)entryBlock);
+ /* Count uses */
+ oatMethodUseCount(cUnit.get());
+ /* Perform null check elimination */
+ oatMethodNullCheckElimination(cUnit.get());
+
+ /* Do some basic block optimizations */
+ oatMethodBasicBlockOptimization(cUnit.get());
+
+ oatInitializeRegAlloc(cUnit.get()); // Needs to happen after SSA naming
+
+ /* Allocate Registers using simple local allocation scheme */
+ oatSimpleRegAlloc(cUnit.get());
+
+ if (specialCase != kNoHandler) {
/*
- * Store back the number of blocks since new blocks may be created of
- * accessing cUnit.
+ * Custom codegen for special cases. If for any reason the
+ * special codegen doesn't success, cUnit->firstLIRInsn will
+ * set to NULL;
*/
- cUnit->numBlocks = numBlocks;
+ oatSpecialMIR2LIR(cUnit.get(), specialCase);
+ }
- /* Identify code range in try blocks and set up the empty catch blocks */
- processTryCatchBlocks(cUnit.get());
+ /* Convert MIR to LIR, etc. */
+ if (cUnit->firstLIRInsn == NULL) {
+ oatMethodMIR2LIR(cUnit.get());
+ }
- /* Set up for simple method detection */
- int numPatterns = sizeof(specialPatterns)/sizeof(specialPatterns[0]);
- bool livePattern = (numPatterns > 0) && !(cUnit->disableOpt & (1 << kMatch));
- bool* deadPattern = (bool*)oatNew(cUnit.get(), sizeof(bool) * numPatterns,
- kAllocMisc);
- SpecialCaseHandler specialCase = kNoHandler;
- int patternPos = 0;
+ // Debugging only
+ if (cUnit->enableDebug & (1 << kDebugDumpCFG)) {
+ oatDumpCFG(cUnit.get(), "/sdcard/cfg/");
+ }
- /* Parse all instructions and put them into containing basic blocks */
- while (codePtr < codeEnd) {
- MIR *insn = (MIR *) oatNew(cUnit.get(), sizeof(MIR), true, kAllocMIR);
- insn->offset = curOffset;
- int width = parseInsn(cUnit.get(), codePtr, &insn->dalvikInsn, false);
- insn->width = width;
- Instruction::Code opcode = insn->dalvikInsn.opcode;
- if (cUnit->opcodeCount != NULL) {
- cUnit->opcodeCount[static_cast<int>(opcode)]++;
- }
+ /* Method is not empty */
+ if (cUnit->firstLIRInsn) {
- /* Terminate when the data section is seen */
- if (width == 0)
- break;
+ // mark the targets of switch statement case labels
+ oatProcessSwitchTables(cUnit.get());
- /* Possible simple method? */
- if (livePattern) {
- livePattern = false;
- specialCase = kNoHandler;
- for (int i = 0; i < numPatterns; i++) {
- if (!deadPattern[i]) {
- if (specialPatterns[i].opcodes[patternPos] == opcode) {
- livePattern = true;
- specialCase = specialPatterns[i].handlerCode;
- } else {
- deadPattern[i] = true;
- }
- }
- }
- patternPos++;
- }
-
- oatAppendMIR(curBlock, insn);
-
- codePtr += width;
- int flags = Instruction::Flags(insn->dalvikInsn.opcode);
-
- int dfFlags = oatDataFlowAttributes[insn->dalvikInsn.opcode];
-
- if (dfFlags & DF_HAS_DEFS) {
- cUnit->defCount += (dfFlags & DF_DA_WIDE) ? 2 : 1;
- }
-
- if (flags & Instruction::kBranch) {
- curBlock = processCanBranch(cUnit.get(), curBlock, insn, curOffset,
- width, flags, codePtr, codeEnd);
- } else if (flags & Instruction::kReturn) {
- curBlock->fallThrough = exitBlock;
- oatInsertGrowableList(cUnit.get(), exitBlock->predecessors,
- (intptr_t)curBlock);
- /*
- * Terminate the current block if there are instructions
- * afterwards.
- */
- if (codePtr < codeEnd) {
- /*
- * Create a fallthrough block for real instructions
- * (incl. NOP).
- */
- if (contentIsInsn(codePtr)) {
- findBlock(cUnit.get(), curOffset + width,
- /* split */
- false,
- /* create */
- true,
- /* immedPredBlockP */
- NULL);
- }
- }
- } else if (flags & Instruction::kThrow) {
- processCanThrow(cUnit.get(), curBlock, insn, curOffset, width, flags,
- tryBlockAddr, codePtr, codeEnd);
- } else if (flags & Instruction::kSwitch) {
- processCanSwitch(cUnit.get(), curBlock, insn, curOffset, width, flags);
- }
- curOffset += width;
- BasicBlock *nextBlock = findBlock(cUnit.get(), curOffset,
- /* split */
- false,
- /* create */
- false,
- /* immedPredBlockP */
- NULL);
- if (nextBlock) {
- /*
- * The next instruction could be the target of a previously parsed
- * forward branch so a block is already created. If the current
- * instruction is not an unconditional branch, connect them through
- * the fall-through link.
- */
- DCHECK(curBlock->fallThrough == NULL ||
- curBlock->fallThrough == nextBlock ||
- curBlock->fallThrough == exitBlock);
-
- if ((curBlock->fallThrough == NULL) && (flags & Instruction::kContinue)) {
- curBlock->fallThrough = nextBlock;
- oatInsertGrowableList(cUnit.get(), nextBlock->predecessors,
- (intptr_t)curBlock);
- }
- curBlock = nextBlock;
- }
- }
-
- if (!(cUnit->disableOpt & (1 << kSkipLargeMethodOptimization))) {
- if ((cUnit->numBlocks > MANY_BLOCKS) ||
- ((cUnit->numBlocks > MANY_BLOCKS_INITIALIZER) &&
- PrettyMethod(method_idx, dex_file, false).find("init>") !=
- std::string::npos)) {
- cUnit->qdMode = true;
- }
- }
-
- if (cUnit->qdMode) {
- cUnit->disableDataflow = true;
- // Disable optimization which require dataflow/ssa
- cUnit->disableOpt |=
- (1 << kNullCheckElimination) |
- (1 << kBBOpt) |
- (1 << kPromoteRegs);
- if (cUnit->printMe) {
- LOG(INFO) << "QD mode enabled: "
- << PrettyMethod(method_idx, dex_file)
- << " too big: " << cUnit->numBlocks;
- }
- }
+ /* Convert LIR into machine code. */
+ oatAssembleLIR(cUnit.get());
if (cUnit->printMe) {
- oatDumpCompilationUnit(cUnit.get());
+ oatCodegenDump(cUnit.get());
}
- if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
- /* Verify if all blocks are connected as claimed */
- oatDataFlowAnalysisDispatcher(cUnit.get(), verifyPredInfo, kAllNodes,
- false /* isIterative */);
- }
-
- /* Perform SSA transformation for the whole method */
- oatMethodSSATransformation(cUnit.get());
-
- /* Detect loops */
- oatMethodLoopDetection(cUnit.get());
-
- /* Count uses */
- oatMethodUseCount(cUnit.get());
-
- /* Perform null check elimination */
- oatMethodNullCheckElimination(cUnit.get());
-
- /* Do some basic block optimizations */
- oatMethodBasicBlockOptimization(cUnit.get());
-
- oatInitializeRegAlloc(cUnit.get()); // Needs to happen after SSA naming
-
- /* Allocate Registers using simple local allocation scheme */
- oatSimpleRegAlloc(cUnit.get());
-
- if (specialCase != kNoHandler) {
- /*
- * Custom codegen for special cases. If for any reason the
- * special codegen doesn't success, cUnit->firstLIRInsn will
- * set to NULL;
- */
- oatSpecialMIR2LIR(cUnit.get(), specialCase);
- }
-
- /* Convert MIR to LIR, etc. */
- if (cUnit->firstLIRInsn == NULL) {
- oatMethodMIR2LIR(cUnit.get());
- }
-
- // Debugging only
- if (cUnit->enableDebug & (1 << kDebugDumpCFG)) {
- oatDumpCFG(cUnit.get(), "/sdcard/cfg/");
- }
-
- /* Method is not empty */
- if (cUnit->firstLIRInsn) {
-
- // mark the targets of switch statement case labels
- oatProcessSwitchTables(cUnit.get());
-
- /* Convert LIR into machine code. */
- oatAssembleLIR(cUnit.get());
-
- if (cUnit->printMe) {
- oatCodegenDump(cUnit.get());
+ if (cUnit->opcodeCount != NULL) {
+ LOG(INFO) << "Opcode Count";
+ for (int i = 0; i < kNumPackedOpcodes; i++) {
+ if (cUnit->opcodeCount[i] != 0) {
+ LOG(INFO) << "-C- "
+ << Instruction::Name(static_cast<Instruction::Code>(i))
+ << " " << cUnit->opcodeCount[i];
}
+ }
+ }
+ }
- if (cUnit->opcodeCount != NULL) {
- LOG(INFO) << "Opcode Count";
- for (int i = 0; i < kNumPackedOpcodes; i++) {
- if (cUnit->opcodeCount[i] != 0) {
- LOG(INFO) << "-C- "
- <<Instruction::Name(static_cast<Instruction::Code>(i))
- << " " << cUnit->opcodeCount[i];
- }
- }
- }
- }
+ // Combine vmap tables - core regs, then fp regs - into vmapTable
+ std::vector<uint16_t> vmapTable;
+ for (size_t i = 0 ; i < cUnit->coreVmapTable.size(); i++) {
+ vmapTable.push_back(cUnit->coreVmapTable[i]);
+ }
+ // If we have a frame, push a marker to take place of lr
+ if (cUnit->frameSize > 0) {
+ vmapTable.push_back(INVALID_VREG);
+ } else {
+ DCHECK_EQ(__builtin_popcount(cUnit->coreSpillMask), 0);
+ DCHECK_EQ(__builtin_popcount(cUnit->fpSpillMask), 0);
+ }
+ // Combine vmap tables - core regs, then fp regs
+ for (uint32_t i = 0; i < cUnit->fpVmapTable.size(); i++) {
+ vmapTable.push_back(cUnit->fpVmapTable[i]);
+ }
+ CompiledMethod* result =
+ new CompiledMethod(cUnit->instructionSet, cUnit->codeBuffer,
+ cUnit->frameSize, cUnit->coreSpillMask,
+ cUnit->fpSpillMask, cUnit->mappingTable, vmapTable);
- // Combine vmap tables - core regs, then fp regs - into vmapTable
- std::vector<uint16_t> vmapTable;
- for (size_t i = 0 ; i < cUnit->coreVmapTable.size(); i++) {
- vmapTable.push_back(cUnit->coreVmapTable[i]);
- }
- // If we have a frame, push a marker to take place of lr
- if (cUnit->frameSize > 0) {
- vmapTable.push_back(INVALID_VREG);
- } else {
- DCHECK_EQ(__builtin_popcount(cUnit->coreSpillMask), 0);
- DCHECK_EQ(__builtin_popcount(cUnit->fpSpillMask), 0);
- }
- // Combine vmap tables - core regs, then fp regs
- for (uint32_t i = 0; i < cUnit->fpVmapTable.size(); i++) {
- vmapTable.push_back(cUnit->fpVmapTable[i]);
- }
- CompiledMethod* result = new CompiledMethod(cUnit->instructionSet, cUnit->codeBuffer,
- cUnit->frameSize, cUnit->coreSpillMask,
- cUnit->fpSpillMask, cUnit->mappingTable,
- vmapTable);
-
- VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file)
- << " (" << (cUnit->codeBuffer.size() * sizeof(cUnit->codeBuffer[0]))
- << " bytes)";
+ VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file)
+ << " (" << (cUnit->codeBuffer.size() * sizeof(cUnit->codeBuffer[0]))
+ << " bytes)";
#ifdef WITH_MEMSTATS
- if (cUnit->enableDebug & (1 << kDebugShowMemoryUsage)) {
- oatDumpMemStats(cUnit.get());
- }
+ if (cUnit->enableDebug & (1 << kDebugShowMemoryUsage)) {
+ oatDumpMemStats(cUnit.get());
+ }
#endif
- oatArenaReset(cUnit.get());
+ oatArenaReset(cUnit.get());
- return result;
+ return result;
}
} // namespace art
-extern "C" art::CompiledMethod* ArtCompileMethod(art::Compiler& compiler,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags, uint32_t method_idx,
- const art::ClassLoader* class_loader,
- const art::DexFile& dex_file)
+extern "C" art::CompiledMethod*
+ ArtCompileMethod(art::Compiler& compiler,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags, uint32_t method_idx,
+ const art::ClassLoader* class_loader,
+ const art::DexFile& dex_file)
{
CHECK_EQ(compiler.GetInstructionSet(), art::oatInstructionSet());
- return art::oatCompileMethod(compiler, code_item, access_flags, method_idx, class_loader, dex_file);
+ return art::oatCompileMethod(compiler, code_item, access_flags, method_idx,
+ class_loader, dex_file);
}
diff --git a/src/compiler/IntermediateRep.cc b/src/compiler/IntermediateRep.cc
index d1ba45c..237f5e0 100644
--- a/src/compiler/IntermediateRep.cc
+++ b/src/compiler/IntermediateRep.cc
@@ -20,40 +20,40 @@
namespace art {
static const char* gOpKindNames[kOpInvalid + 1] = {
- "OpMov",
- "OpMvn",
- "OpCmp",
- "OpLsl",
- "OpLsr",
- "OpAsr",
- "OpRor",
- "OpNot",
- "OpAnd",
- "OpOr",
- "OpXor",
- "OpNeg",
- "OpAdd",
- "OpAdc",
- "OpSub",
- "OpSbc",
- "OpRsub",
- "OpMul",
- "OpDiv",
- "OpRem",
- "OpBic",
- "OpCmn",
- "OpTst",
- "OpBkpt",
- "OpBlx",
- "OpPush",
- "OpPop",
- "Op2Char",
- "Op2Short",
- "Op2Byte",
- "OpCondBr",
- "OpUncondBr",
- "OpBx",
- "OpInvalid",
+ "OpMov",
+ "OpMvn",
+ "OpCmp",
+ "OpLsl",
+ "OpLsr",
+ "OpAsr",
+ "OpRor",
+ "OpNot",
+ "OpAnd",
+ "OpOr",
+ "OpXor",
+ "OpNeg",
+ "OpAdd",
+ "OpAdc",
+ "OpSub",
+ "OpSbc",
+ "OpRsub",
+ "OpMul",
+ "OpDiv",
+ "OpRem",
+ "OpBic",
+ "OpCmn",
+ "OpTst",
+ "OpBkpt",
+ "OpBlx",
+ "OpPush",
+ "OpPop",
+ "Op2Char",
+ "Op2Short",
+ "Op2Byte",
+ "OpCondBr",
+ "OpUncondBr",
+ "OpBx",
+ "OpInvalid",
};
std::ostream& operator<<(std::ostream& os, const OpKind& kind) {
@@ -68,62 +68,62 @@
/* Allocate a new basic block */
BasicBlock* oatNewBB(CompilationUnit* cUnit, BBType blockType, int blockId)
{
- BasicBlock* bb = (BasicBlock* )oatNew(cUnit, sizeof(BasicBlock), true,
- kAllocBB);
- bb->blockType = blockType;
- bb->id = blockId;
- bb->predecessors = (GrowableList*) oatNew(cUnit, sizeof(GrowableList),
- false, kAllocPredecessors);
- oatInitGrowableList(cUnit, bb->predecessors,
- (blockType == kExitBlock) ? 2048 : 2,
- kListPredecessors);
- return bb;
+ BasicBlock* bb = (BasicBlock* )oatNew(cUnit, sizeof(BasicBlock), true,
+ kAllocBB);
+ bb->blockType = blockType;
+ bb->id = blockId;
+ bb->predecessors = (GrowableList*) oatNew(cUnit, sizeof(GrowableList),
+ false, kAllocPredecessors);
+ oatInitGrowableList(cUnit, bb->predecessors,
+ (blockType == kExitBlock) ? 2048 : 2,
+ kListPredecessors);
+ return bb;
}
/* Insert an MIR instruction to the end of a basic block */
void oatAppendMIR(BasicBlock* bb, MIR* mir)
{
- if (bb->firstMIRInsn == NULL) {
- DCHECK(bb->lastMIRInsn == NULL);
- bb->lastMIRInsn = bb->firstMIRInsn = mir;
- mir->prev = mir->next = NULL;
- } else {
- bb->lastMIRInsn->next = mir;
- mir->prev = bb->lastMIRInsn;
- mir->next = NULL;
- bb->lastMIRInsn = mir;
- }
+ if (bb->firstMIRInsn == NULL) {
+ DCHECK(bb->lastMIRInsn == NULL);
+ bb->lastMIRInsn = bb->firstMIRInsn = mir;
+ mir->prev = mir->next = NULL;
+ } else {
+ bb->lastMIRInsn->next = mir;
+ mir->prev = bb->lastMIRInsn;
+ mir->next = NULL;
+ bb->lastMIRInsn = mir;
+ }
}
/* Insert an MIR instruction to the head of a basic block */
void oatPrependMIR(BasicBlock* bb, MIR* mir)
{
- if (bb->firstMIRInsn == NULL) {
- DCHECK(bb->lastMIRInsn == NULL);
- bb->lastMIRInsn = bb->firstMIRInsn = mir;
- mir->prev = mir->next = NULL;
- } else {
- bb->firstMIRInsn->prev = mir;
- mir->next = bb->firstMIRInsn;
- mir->prev = NULL;
- bb->firstMIRInsn = mir;
- }
+ if (bb->firstMIRInsn == NULL) {
+ DCHECK(bb->lastMIRInsn == NULL);
+ bb->lastMIRInsn = bb->firstMIRInsn = mir;
+ mir->prev = mir->next = NULL;
+ } else {
+ bb->firstMIRInsn->prev = mir;
+ mir->next = bb->firstMIRInsn;
+ mir->prev = NULL;
+ bb->firstMIRInsn = mir;
+ }
}
/* Insert a MIR instruction after the specified MIR */
void oatInsertMIRAfter(BasicBlock* bb, MIR* currentMIR, MIR* newMIR)
{
- newMIR->prev = currentMIR;
- newMIR->next = currentMIR->next;
- currentMIR->next = newMIR;
+ newMIR->prev = currentMIR;
+ newMIR->next = currentMIR->next;
+ currentMIR->next = newMIR;
- if (newMIR->next) {
- /* Is not the last MIR in the block */
- newMIR->next->prev = newMIR;
- } else {
- /* Is the last MIR in the block */
- bb->lastMIRInsn = newMIR;
- }
+ if (newMIR->next) {
+ /* Is not the last MIR in the block */
+ newMIR->next->prev = newMIR;
+ } else {
+ /* Is the last MIR in the block */
+ bb->lastMIRInsn = newMIR;
+ }
}
/*
@@ -132,16 +132,16 @@
*/
void oatAppendLIR(CompilationUnit *cUnit, LIR* lir)
{
- if (cUnit->firstLIRInsn == NULL) {
- DCHECK(cUnit->lastLIRInsn == NULL);
- cUnit->lastLIRInsn = cUnit->firstLIRInsn = lir;
- lir->prev = lir->next = NULL;
- } else {
- cUnit->lastLIRInsn->next = lir;
- lir->prev = cUnit->lastLIRInsn;
- lir->next = NULL;
- cUnit->lastLIRInsn = lir;
- }
+ if (cUnit->firstLIRInsn == NULL) {
+ DCHECK(cUnit->lastLIRInsn == NULL);
+ cUnit->lastLIRInsn = cUnit->firstLIRInsn = lir;
+ lir->prev = lir->next = NULL;
+ } else {
+ cUnit->lastLIRInsn->next = lir;
+ lir->prev = cUnit->lastLIRInsn;
+ lir->next = NULL;
+ cUnit->lastLIRInsn = lir;
+ }
}
/*
@@ -152,13 +152,13 @@
*/
void oatInsertLIRBefore(LIR* currentLIR, LIR* newLIR)
{
- DCHECK(currentLIR->prev != NULL);
- LIR *prevLIR = currentLIR->prev;
+ DCHECK(currentLIR->prev != NULL);
+ LIR *prevLIR = currentLIR->prev;
- prevLIR->next = newLIR;
- newLIR->prev = prevLIR;
- newLIR->next = currentLIR;
- currentLIR->prev = newLIR;
+ prevLIR->next = newLIR;
+ newLIR->prev = prevLIR;
+ newLIR->next = currentLIR;
+ currentLIR->prev = newLIR;
}
/*
@@ -169,10 +169,10 @@
*/
void oatInsertLIRAfter(LIR* currentLIR, LIR* newLIR)
{
- newLIR->prev = currentLIR;
- newLIR->next = currentLIR->next;
- currentLIR->next = newLIR;
- newLIR->next->prev = newLIR;
+ newLIR->prev = currentLIR;
+ newLIR->next = currentLIR->next;
+ currentLIR->next = newLIR;
+ newLIR->next->prev = newLIR;
}
} // namespace art
diff --git a/src/compiler/Ralloc.cc b/src/compiler/Ralloc.cc
index 25acdd5..54728db 100644
--- a/src/compiler/Ralloc.cc
+++ b/src/compiler/Ralloc.cc
@@ -22,63 +22,63 @@
namespace art {
bool setFp(CompilationUnit* cUnit, int index, bool isFP) {
- bool change = false;
- if (cUnit->regLocation[index].highWord) {
- return change;
- }
- if (isFP && !cUnit->regLocation[index].fp) {
- cUnit->regLocation[index].fp = true;
- cUnit->regLocation[index].defined = true;
- change = true;
- }
+ bool change = false;
+ if (cUnit->regLocation[index].highWord) {
return change;
+ }
+ if (isFP && !cUnit->regLocation[index].fp) {
+ cUnit->regLocation[index].fp = true;
+ cUnit->regLocation[index].defined = true;
+ change = true;
+ }
+ return change;
}
bool setCore(CompilationUnit* cUnit, int index, bool isCore) {
- bool change = false;
- if (cUnit->regLocation[index].highWord) {
- return change;
- }
- if (isCore && !cUnit->regLocation[index].defined) {
- cUnit->regLocation[index].core = true;
- cUnit->regLocation[index].defined = true;
- change = true;
- }
+ bool change = false;
+ if (cUnit->regLocation[index].highWord) {
return change;
+ }
+ if (isCore && !cUnit->regLocation[index].defined) {
+ cUnit->regLocation[index].core = true;
+ cUnit->regLocation[index].defined = true;
+ change = true;
+ }
+ return change;
}
bool remapNames(CompilationUnit* cUnit, BasicBlock* bb)
{
- if (bb->blockType != kDalvikByteCode && bb->blockType != kEntryBlock &&
- bb->blockType != kExitBlock)
- return false;
-
- for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
- SSARepresentation *ssaRep = mir->ssaRep;
- if (ssaRep) {
- for (int i = 0; i < ssaRep->numUses; i++) {
- ssaRep->uses[i] = cUnit->phiAliasMap[ssaRep->uses[i]];
- }
- for (int i = 0; i < ssaRep->numDefs; i++) {
- ssaRep->defs[i] = cUnit->phiAliasMap[ssaRep->defs[i]];
- }
- }
- }
+ if (bb->blockType != kDalvikByteCode && bb->blockType != kEntryBlock &&
+ bb->blockType != kExitBlock)
return false;
+
+ for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ SSARepresentation *ssaRep = mir->ssaRep;
+ if (ssaRep) {
+ for (int i = 0; i < ssaRep->numUses; i++) {
+ ssaRep->uses[i] = cUnit->phiAliasMap[ssaRep->uses[i]];
+ }
+ for (int i = 0; i < ssaRep->numDefs; i++) {
+ ssaRep->defs[i] = cUnit->phiAliasMap[ssaRep->defs[i]];
+ }
+ }
+ }
+ return false;
}
// Try to find the next move result which might have an FP target
SSARepresentation* findMoveResult(MIR* mir)
{
- SSARepresentation* res = NULL;
- for (; mir; mir = mir->next) {
- if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
- (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
- res = mir->ssaRep;
- break;
- }
+ SSARepresentation* res = NULL;
+ for (; mir; mir = mir->next) {
+ if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
+ (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
+ res = mir->ssaRep;
+ break;
}
- return res;
+ }
+ return res;
}
/*
@@ -88,213 +88,212 @@
*/
bool inferTypeAndSize(CompilationUnit* cUnit, BasicBlock* bb)
{
- MIR *mir;
- bool changed = false; // Did anything change?
+ MIR *mir;
+ bool changed = false; // Did anything change?
- if (bb->dataFlowInfo == NULL) return false;
- if (bb->blockType != kDalvikByteCode && bb->blockType != kEntryBlock)
- return false;
+ if (bb->dataFlowInfo == NULL) return false;
+ if (bb->blockType != kDalvikByteCode && bb->blockType != kEntryBlock)
+ return false;
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- SSARepresentation *ssaRep = mir->ssaRep;
- if (ssaRep) {
- int attrs = oatDataFlowAttributes[mir->dalvikInsn.opcode];
+ for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ SSARepresentation *ssaRep = mir->ssaRep;
+ if (ssaRep) {
+ int attrs = oatDataFlowAttributes[mir->dalvikInsn.opcode];
- // Handle defs
- if (attrs & (DF_DA | DF_DA_WIDE)) {
- if (attrs & DF_CORE_A) {
- changed |= setCore(cUnit, ssaRep->defs[0], true);
- }
- if (attrs & DF_DA_WIDE) {
- cUnit->regLocation[ssaRep->defs[0]].wide = true;
- cUnit->regLocation[ssaRep->defs[1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->defs[0])+1,
- SRegToVReg(cUnit, ssaRep->defs[1]));
- }
- }
-
- // Handles uses
- int next = 0;
- if (attrs & (DF_UA | DF_UA_WIDE)) {
- if (attrs & DF_CORE_A) {
- changed |= setCore(cUnit, ssaRep->uses[next], true);
- }
- if (attrs & DF_UA_WIDE) {
- cUnit->regLocation[ssaRep->uses[next]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
- SRegToVReg(cUnit, ssaRep->uses[next + 1]));
- next += 2;
- } else {
- next++;
- }
- }
- if (attrs & (DF_UB | DF_UB_WIDE)) {
- if (attrs & DF_CORE_B) {
- changed |= setCore(cUnit, ssaRep->uses[next], true);
- }
- if (attrs & DF_UB_WIDE) {
- cUnit->regLocation[ssaRep->uses[next]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
- SRegToVReg(cUnit, ssaRep->uses[next + 1]));
- next += 2;
- } else {
- next++;
- }
- }
- if (attrs & (DF_UC | DF_UC_WIDE)) {
- if (attrs & DF_CORE_C) {
- changed |= setCore(cUnit, ssaRep->uses[next], true);
- }
- if (attrs & DF_UC_WIDE) {
- cUnit->regLocation[ssaRep->uses[next]].wide = true;
- cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
- SRegToVReg(cUnit, ssaRep->uses[next + 1]));
- }
- }
-
- // Special-case handling for format 35c/3rc invokes
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int flags = (static_cast<int>(opcode) >= kNumPackedOpcodes) ? 0 : Instruction::Flags(mir->dalvikInsn.opcode);
- if ((flags & Instruction::kInvoke) && (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
- DCHECK_EQ(next, 0);
- int target_idx = mir->dalvikInsn.vB;
- const char* shorty =
- oatGetShortyFromTargetIdx(cUnit, target_idx);
- // Handle result type if floating point
- if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
- // Find move-result that consumes this result
- SSARepresentation* tgtRep = findMoveResult(mir->next);
- // Might be in next basic block
- if (!tgtRep) {
- tgtRep = findMoveResult(bb->fallThrough->firstMIRInsn);
- }
- // Result might not be used at all, so no move-result
- if (tgtRep) {
- tgtRep->fpDef[0] = true;
- changed |= setFp(cUnit, tgtRep->defs[0], true);
- if (shorty[0] == 'D') {
- tgtRep->fpDef[1] = true;
- changed |= setFp(cUnit, tgtRep->defs[1], true);
- }
- }
- }
- int numUses = mir->dalvikInsn.vA;
- // If this is a non-static invoke, skip implicit "this"
- if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
- (mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
- cUnit->regLocation[ssaRep->uses[next]].defined = true;
- cUnit->regLocation[ssaRep->uses[next]].core = true;
- next++;
- }
- uint32_t cpos = 1;
- if (strlen(shorty) > 1) {
- for (int i = next; i < numUses;) {
- DCHECK_LT(cpos, strlen(shorty));
- switch (shorty[cpos++]) {
- case 'D':
- ssaRep->fpUse[i] = true;
- ssaRep->fpUse[i+1] = true;
- cUnit->regLocation[ssaRep->uses[i]].wide = true;
- cUnit->regLocation[ssaRep->uses[i+1]].highWord
- = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[i])+1,
- SRegToVReg(cUnit, ssaRep->uses[i+1]));
- i++;
- break;
- case 'J':
- cUnit->regLocation[ssaRep->uses[i]].wide = true;
- cUnit->regLocation[ssaRep->uses[i+1]].highWord
- = true;
- DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[i])+1,
- SRegToVReg(cUnit, ssaRep->uses[i+1]));
- changed |= setCore(cUnit, ssaRep->uses[i],true);
- i++;
- break;
- case 'F':
- ssaRep->fpUse[i] = true;
- break;
- default:
- changed |= setCore(cUnit,ssaRep->uses[i], true);
- break;
- }
- i++;
- }
- }
- }
-
- for (int i=0; ssaRep->fpUse && i< ssaRep->numUses; i++) {
- if (ssaRep->fpUse[i])
- changed |= setFp(cUnit, ssaRep->uses[i], true);
- }
- for (int i=0; ssaRep->fpDef && i< ssaRep->numDefs; i++) {
- if (ssaRep->fpDef[i])
- changed |= setFp(cUnit, ssaRep->defs[i], true);
- }
- // Special-case handling for moves & Phi
- if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
- // If any of our inputs or outputs is defined, set all
- bool definedFP = false;
- bool definedCore = false;
- definedFP |= (cUnit->regLocation[ssaRep->defs[0]].defined &&
- cUnit->regLocation[ssaRep->defs[0]].fp);
- definedCore |= (cUnit->regLocation[ssaRep->defs[0]].defined &&
- cUnit->regLocation[ssaRep->defs[0]].core);
- for (int i = 0; i < ssaRep->numUses; i++) {
- definedFP |= (cUnit->regLocation[ssaRep->uses[i]].defined &&
- cUnit->regLocation[ssaRep->uses[i]].fp);
- definedCore |= (cUnit->regLocation[ssaRep->uses[i]].defined
- && cUnit->regLocation[ssaRep->uses[i]].core);
- }
- /*
- * TODO: cleaner fix
- * We don't normally expect to see a Dalvik register
- * definition used both as a floating point and core
- * value. However, the instruction rewriting that occurs
- * during verification can eliminate some type information,
- * leaving us confused. The real fix here is either to
- * add explicit type information to Dalvik byte codes,
- * or to recognize THROW_VERIFICATION_ERROR as
- * an unconditional branch and support dead code elimination.
- * As a workaround we can detect this situation and
- * disable register promotion (which is the only thing that
- * relies on distinctions between core and fp usages.
- */
- if ((definedFP && definedCore) &&
- ((cUnit->disableOpt & (1 << kPromoteRegs)) == 0)) {
- LOG(WARNING) << PrettyMethod(cUnit->method_idx, *cUnit->dex_file)
- << " op at block " << bb->id
- << " has both fp and core uses for same def.";
- cUnit->disableOpt |= (1 << kPromoteRegs);
- }
- changed |= setFp(cUnit, ssaRep->defs[0], definedFP);
- changed |= setCore(cUnit, ssaRep->defs[0], definedCore);
- for (int i = 0; i < ssaRep->numUses; i++) {
- changed |= setFp(cUnit, ssaRep->uses[i], definedFP);
- changed |= setCore(cUnit, ssaRep->uses[i], definedCore);
- }
- }
+ // Handle defs
+ if (attrs & (DF_DA | DF_DA_WIDE)) {
+ if (attrs & DF_CORE_A) {
+ changed |= setCore(cUnit, ssaRep->defs[0], true);
}
+ if (attrs & DF_DA_WIDE) {
+ cUnit->regLocation[ssaRep->defs[0]].wide = true;
+ cUnit->regLocation[ssaRep->defs[1]].highWord = true;
+ DCHECK_EQ(SRegToVReg(cUnit, ssaRep->defs[0])+1,
+ SRegToVReg(cUnit, ssaRep->defs[1]));
+ }
+ }
+
+ // Handles uses
+ int next = 0;
+ if (attrs & (DF_UA | DF_UA_WIDE)) {
+ if (attrs & DF_CORE_A) {
+ changed |= setCore(cUnit, ssaRep->uses[next], true);
+ }
+ if (attrs & DF_UA_WIDE) {
+ cUnit->regLocation[ssaRep->uses[next]].wide = true;
+ cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
+ DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
+ SRegToVReg(cUnit, ssaRep->uses[next + 1]));
+ next += 2;
+ } else {
+ next++;
+ }
+ }
+ if (attrs & (DF_UB | DF_UB_WIDE)) {
+ if (attrs & DF_CORE_B) {
+ changed |= setCore(cUnit, ssaRep->uses[next], true);
+ }
+ if (attrs & DF_UB_WIDE) {
+ cUnit->regLocation[ssaRep->uses[next]].wide = true;
+ cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
+ DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
+ SRegToVReg(cUnit, ssaRep->uses[next + 1]));
+ next += 2;
+ } else {
+ next++;
+ }
+ }
+ if (attrs & (DF_UC | DF_UC_WIDE)) {
+ if (attrs & DF_CORE_C) {
+ changed |= setCore(cUnit, ssaRep->uses[next], true);
+ }
+ if (attrs & DF_UC_WIDE) {
+ cUnit->regLocation[ssaRep->uses[next]].wide = true;
+ cUnit->regLocation[ssaRep->uses[next + 1]].highWord = true;
+ DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[next])+1,
+ SRegToVReg(cUnit, ssaRep->uses[next + 1]));
+ }
+ }
+
+ // Special-case handling for format 35c/3rc invokes
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ int flags = (static_cast<int>(opcode) >= kNumPackedOpcodes)
+ ? 0 : Instruction::Flags(mir->dalvikInsn.opcode);
+ if ((flags & Instruction::kInvoke) &&
+ (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
+ DCHECK_EQ(next, 0);
+ int target_idx = mir->dalvikInsn.vB;
+ const char* shorty = oatGetShortyFromTargetIdx(cUnit, target_idx);
+ // Handle result type if floating point
+ if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
+ // Find move-result that consumes this result
+ SSARepresentation* tgtRep = findMoveResult(mir->next);
+ // Might be in next basic block
+ if (!tgtRep) {
+ tgtRep = findMoveResult(bb->fallThrough->firstMIRInsn);
+ }
+ // Result might not be used at all, so no move-result
+ if (tgtRep) {
+ tgtRep->fpDef[0] = true;
+ changed |= setFp(cUnit, tgtRep->defs[0], true);
+ if (shorty[0] == 'D') {
+ tgtRep->fpDef[1] = true;
+ changed |= setFp(cUnit, tgtRep->defs[1], true);
+ }
+ }
+ }
+ int numUses = mir->dalvikInsn.vA;
+ // If this is a non-static invoke, skip implicit "this"
+ if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
+ (mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
+ cUnit->regLocation[ssaRep->uses[next]].defined = true;
+ cUnit->regLocation[ssaRep->uses[next]].core = true;
+ next++;
+ }
+ uint32_t cpos = 1;
+ if (strlen(shorty) > 1) {
+ for (int i = next; i < numUses;) {
+ DCHECK_LT(cpos, strlen(shorty));
+ switch (shorty[cpos++]) {
+ case 'D':
+ ssaRep->fpUse[i] = true;
+ ssaRep->fpUse[i+1] = true;
+ cUnit->regLocation[ssaRep->uses[i]].wide = true;
+ cUnit->regLocation[ssaRep->uses[i+1]].highWord = true;
+ DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[i])+1,
+ SRegToVReg(cUnit, ssaRep->uses[i+1]));
+ i++;
+ break;
+ case 'J':
+ cUnit->regLocation[ssaRep->uses[i]].wide = true;
+ cUnit->regLocation[ssaRep->uses[i+1]].highWord = true;
+ DCHECK_EQ(SRegToVReg(cUnit, ssaRep->uses[i])+1,
+ SRegToVReg(cUnit, ssaRep->uses[i+1]));
+ changed |= setCore(cUnit, ssaRep->uses[i],true);
+ i++;
+ break;
+ case 'F':
+ ssaRep->fpUse[i] = true;
+ break;
+ default:
+ changed |= setCore(cUnit,ssaRep->uses[i], true);
+ break;
+ }
+ i++;
+ }
+ }
+ }
+
+ for (int i=0; ssaRep->fpUse && i< ssaRep->numUses; i++) {
+ if (ssaRep->fpUse[i])
+ changed |= setFp(cUnit, ssaRep->uses[i], true);
+ }
+ for (int i=0; ssaRep->fpDef && i< ssaRep->numDefs; i++) {
+ if (ssaRep->fpDef[i])
+ changed |= setFp(cUnit, ssaRep->defs[i], true);
+ }
+ // Special-case handling for moves & Phi
+ if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
+ // If any of our inputs or outputs is defined, set all
+ bool definedFP = false;
+ bool definedCore = false;
+ definedFP |= (cUnit->regLocation[ssaRep->defs[0]].defined &&
+ cUnit->regLocation[ssaRep->defs[0]].fp);
+ definedCore |= (cUnit->regLocation[ssaRep->defs[0]].defined &&
+ cUnit->regLocation[ssaRep->defs[0]].core);
+ for (int i = 0; i < ssaRep->numUses; i++) {
+ definedFP |= (cUnit->regLocation[ssaRep->uses[i]].defined &&
+ cUnit->regLocation[ssaRep->uses[i]].fp);
+ definedCore |= (cUnit->regLocation[ssaRep->uses[i]].defined
+ && cUnit->regLocation[ssaRep->uses[i]].core);
+ }
+ /*
+ * TODO: cleaner fix
+ * We don't normally expect to see a Dalvik register
+ * definition used both as a floating point and core
+ * value. However, the instruction rewriting that occurs
+ * during verification can eliminate some type information,
+ * leaving us confused. The real fix here is either to
+ * add explicit type information to Dalvik byte codes,
+ * or to recognize THROW_VERIFICATION_ERROR as
+ * an unconditional branch and support dead code elimination.
+ * As a workaround we can detect this situation and
+ * disable register promotion (which is the only thing that
+ * relies on distinctions between core and fp usages.
+ */
+ if ((definedFP && definedCore) &&
+ ((cUnit->disableOpt & (1 << kPromoteRegs)) == 0)) {
+ LOG(WARNING) << PrettyMethod(cUnit->method_idx, *cUnit->dex_file)
+ << " op at block " << bb->id
+ << " has both fp and core uses for same def.";
+ cUnit->disableOpt |= (1 << kPromoteRegs);
+ }
+ changed |= setFp(cUnit, ssaRep->defs[0], definedFP);
+ changed |= setCore(cUnit, ssaRep->defs[0], definedCore);
+ for (int i = 0; i < ssaRep->numUses; i++) {
+ changed |= setFp(cUnit, ssaRep->uses[i], definedFP);
+ changed |= setCore(cUnit, ssaRep->uses[i], definedCore);
+ }
+ }
}
- return changed;
+ }
+ return changed;
}
static const char* storageName[] = {" Frame ", "PhysReg", " Spill "};
void oatDumpRegLocTable(RegLocation* table, int count)
{
- for (int i = 0; i < count; i++) {
- LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c%d %c%d S%d",
- i, storageName[table[i].location], table[i].wide ? 'W' : 'N',
- table[i].defined ? 'D' : 'U', table[i].fp ? 'F' : 'C',
- table[i].highWord ? 'H' : 'L', table[i].home ? 'h' : 't',
- oatIsFpReg(table[i].lowReg) ? 's' : 'r',
- table[i].lowReg & oatFpRegMask(),
- oatIsFpReg(table[i].highReg) ? 's' : 'r',
- table[i].highReg & oatFpRegMask(), table[i].sRegLow);
- }
+ for (int i = 0; i < count; i++) {
+ LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c%d %c%d S%d",
+ i, storageName[table[i].location], table[i].wide ? 'W' : 'N',
+ table[i].defined ? 'D' : 'U', table[i].fp ? 'F' : 'C',
+ table[i].highWord ? 'H' : 'L', table[i].home ? 'h' : 't',
+ oatIsFpReg(table[i].lowReg) ? 's' : 'r',
+ table[i].lowReg & oatFpRegMask(),
+ oatIsFpReg(table[i].highReg) ? 's' : 'r',
+ table[i].highReg & oatFpRegMask(), table[i].sRegLow);
+ }
}
static const RegLocation freshLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0,
@@ -308,121 +307,119 @@
*/
void oatSimpleRegAlloc(CompilationUnit* cUnit)
{
- int i;
- RegLocation* loc;
+ int i;
+ RegLocation* loc;
- /* Allocate the location map */
- loc = (RegLocation*)oatNew(cUnit, cUnit->numSSARegs * sizeof(*loc), true,
- kAllocRegAlloc);
- for (i=0; i< cUnit->numSSARegs; i++) {
- loc[i] = freshLoc;
- loc[i].sRegLow = i;
+ /* Allocate the location map */
+ loc = (RegLocation*)oatNew(cUnit, cUnit->numSSARegs * sizeof(*loc), true,
+ kAllocRegAlloc);
+ for (i=0; i< cUnit->numSSARegs; i++) {
+ loc[i] = freshLoc;
+ loc[i].sRegLow = i;
+ }
+
+ /* Patch up the locations for Method* and the compiler temps */
+ loc[cUnit->methodSReg].location = kLocCompilerTemp;
+ loc[cUnit->methodSReg].defined = true;
+ for (i = 0; i < cUnit->numCompilerTemps; i++) {
+ CompilerTemp* ct = (CompilerTemp*)cUnit->compilerTemps.elemList[i];
+ loc[ct->sReg].location = kLocCompilerTemp;
+ loc[ct->sReg].defined = true;
+ }
+
+ cUnit->regLocation = loc;
+
+ /* Allocation the promotion map */
+ int numRegs = cUnit->numDalvikRegisters;
+ cUnit->promotionMap =
+ (PromotionMap*)oatNew(cUnit, (numRegs + cUnit->numCompilerTemps + 1) *
+ sizeof(cUnit->promotionMap[0]), true,
+ kAllocRegAlloc);
+
+ /* Add types of incoming arguments based on signature */
+ int numIns = cUnit->numIns;
+ if (numIns > 0) {
+ int sReg = numRegs - numIns;
+ if ((cUnit->access_flags & kAccStatic) == 0) {
+ // For non-static, skip past "this"
+ cUnit->regLocation[sReg].defined = true;
+ cUnit->regLocation[sReg].core = true;
+ sReg++;
}
-
- /* Patch up the locations for Method* and the compiler temps */
- loc[cUnit->methodSReg].location = kLocCompilerTemp;
- loc[cUnit->methodSReg].defined = true;
- for (i = 0; i < cUnit->numCompilerTemps; i++) {
- CompilerTemp* ct = (CompilerTemp*)cUnit->compilerTemps.elemList[i];
- loc[ct->sReg].location = kLocCompilerTemp;
- loc[ct->sReg].defined = true;
- }
-
- cUnit->regLocation = loc;
-
- /* Allocation the promotion map */
- int numRegs = cUnit->numDalvikRegisters;
- cUnit->promotionMap =
- (PromotionMap*)oatNew(cUnit, (numRegs + cUnit->numCompilerTemps + 1) *
- sizeof(cUnit->promotionMap[0]), true,
- kAllocRegAlloc);
-
- /* Add types of incoming arguments based on signature */
- int numIns = cUnit->numIns;
- if (numIns > 0) {
- int sReg = numRegs - numIns;
- if ((cUnit->access_flags & kAccStatic) == 0) {
- // For non-static, skip past "this"
+ const char* shorty = cUnit->shorty;
+ int shorty_len = strlen(shorty);
+ for (int i = 1; i < shorty_len; i++) {
+ switch (shorty[i]) {
+ case 'D':
+ cUnit->regLocation[sReg].wide = true;
+ cUnit->regLocation[sReg+1].highWord = true;
+ cUnit->regLocation[sReg+1].fp = true;
+ DCHECK_EQ(SRegToVReg(cUnit, sReg)+1, SRegToVReg(cUnit, sReg+1));
+ cUnit->regLocation[sReg].fp = true;
+ cUnit->regLocation[sReg].defined = true;
+ sReg++;
+ break;
+ case 'J':
+ cUnit->regLocation[sReg].wide = true;
+ cUnit->regLocation[sReg+1].highWord = true;
+ DCHECK_EQ(SRegToVReg(cUnit, sReg)+1, SRegToVReg(cUnit, sReg+1));
+ cUnit->regLocation[sReg].core = true;
+ cUnit->regLocation[sReg].defined = true;
+ sReg++;
+ break;
+ case 'F':
+ cUnit->regLocation[sReg].fp = true;
cUnit->regLocation[sReg].defined = true;
+ break;
+ default:
cUnit->regLocation[sReg].core = true;
- sReg++;
+ cUnit->regLocation[sReg].defined = true;
+ break;
}
- const char* shorty = cUnit->shorty;
- int shorty_len = strlen(shorty);
- for (int i = 1; i < shorty_len; i++) {
- switch (shorty[i]) {
- case 'D':
- cUnit->regLocation[sReg].wide = true;
- cUnit->regLocation[sReg+1].highWord = true;
- cUnit->regLocation[sReg+1].fp = true;
- DCHECK_EQ(SRegToVReg(cUnit, sReg)+1,
- SRegToVReg(cUnit, sReg+1));
- cUnit->regLocation[sReg].fp = true;
- cUnit->regLocation[sReg].defined = true;
- sReg++;
- break;
- case 'J':
- cUnit->regLocation[sReg].wide = true;
- cUnit->regLocation[sReg+1].highWord = true;
- DCHECK_EQ(SRegToVReg(cUnit, sReg)+1,
- SRegToVReg(cUnit, sReg+1));
- cUnit->regLocation[sReg].core = true;
- cUnit->regLocation[sReg].defined = true;
- sReg++;
- break;
- case 'F':
- cUnit->regLocation[sReg].fp = true;
- cUnit->regLocation[sReg].defined = true;
- break;
- default:
- cUnit->regLocation[sReg].core = true;
- cUnit->regLocation[sReg].defined = true;
- break;
- }
- sReg++;
- }
+ sReg++;
+ }
+ }
+
+ /* Remap names */
+ oatDataFlowAnalysisDispatcher(cUnit, remapNames,
+ kPreOrderDFSTraversal,
+ false /* isIterative */);
+
+ /* Do type & size inference pass */
+ oatDataFlowAnalysisDispatcher(cUnit, inferTypeAndSize,
+ kPreOrderDFSTraversal,
+ true /* isIterative */);
+
+ /*
+ * Set the sRegLow field to refer to the pre-SSA name of the
+ * base Dalvik virtual register. Once we add a better register
+ * allocator, remove this remapping.
+ */
+ for (i=0; i < cUnit->numSSARegs; i++) {
+ if (cUnit->regLocation[i].location != kLocCompilerTemp) {
+ cUnit->regLocation[i].sRegLow = SRegToVReg(cUnit, loc[i].sRegLow);
}
+ }
- /* Remap names */
- oatDataFlowAnalysisDispatcher(cUnit, remapNames,
- kPreOrderDFSTraversal,
- false /* isIterative */);
+ cUnit->coreSpillMask = 0;
+ cUnit->fpSpillMask = 0;
+ cUnit->numCoreSpills = 0;
- /* Do type & size inference pass */
- oatDataFlowAnalysisDispatcher(cUnit, inferTypeAndSize,
- kPreOrderDFSTraversal,
- true /* isIterative */);
+ oatDoPromotion(cUnit);
- /*
- * Set the sRegLow field to refer to the pre-SSA name of the
- * base Dalvik virtual register. Once we add a better register
- * allocator, remove this remapping.
- */
- for (i=0; i < cUnit->numSSARegs; i++) {
- if (cUnit->regLocation[i].location != kLocCompilerTemp) {
- cUnit->regLocation[i].sRegLow = SRegToVReg(cUnit, loc[i].sRegLow);
- }
- }
+ if (cUnit->printMe && !(cUnit->disableOpt & (1 << kPromoteRegs))) {
+ LOG(INFO) << "After Promotion";
+ oatDumpRegLocTable(cUnit->regLocation, cUnit->numSSARegs);
+ }
- cUnit->coreSpillMask = 0;
- cUnit->fpSpillMask = 0;
- cUnit->numCoreSpills = 0;
-
- oatDoPromotion(cUnit);
-
- if (cUnit->printMe && !(cUnit->disableOpt & (1 << kPromoteRegs))) {
- LOG(INFO) << "After Promotion";
- oatDumpRegLocTable(cUnit->regLocation, cUnit->numSSARegs);
- }
-
- /* Figure out the frame size */
- static const uint32_t kAlignMask = kStackAlignment - 1;
- uint32_t size = (cUnit->numCoreSpills + cUnit->numFPSpills +
- 1 /* filler word */ + cUnit->numRegs + cUnit->numOuts +
- cUnit->numCompilerTemps + 1 /* curMethod* */)
- * sizeof(uint32_t);
- /* Align and set */
- cUnit->frameSize = (size + kAlignMask) & ~(kAlignMask);
+ /* Figure out the frame size */
+ static const uint32_t kAlignMask = kStackAlignment - 1;
+ uint32_t size = (cUnit->numCoreSpills + cUnit->numFPSpills +
+ 1 /* filler word */ + cUnit->numRegs + cUnit->numOuts +
+ cUnit->numCompilerTemps + 1 /* curMethod* */)
+ * sizeof(uint32_t);
+ /* Align and set */
+ cUnit->frameSize = (size + kAlignMask) & ~(kAlignMask);
}
} // namespace art
diff --git a/src/compiler/SSATransformation.cc b/src/compiler/SSATransformation.cc
index c5a6b8f..ada9351 100644
--- a/src/compiler/SSATransformation.cc
+++ b/src/compiler/SSATransformation.cc
@@ -23,74 +23,73 @@
void recordDFSOrders(CompilationUnit* cUnit, BasicBlock* block)
{
- if (block->visited || block->hidden) return;
- block->visited = true;
+ if (block->visited || block->hidden) return;
+ block->visited = true;
- // Can this block be reached only via previous block fallthrough?
- if ((block->blockType == kDalvikByteCode) &&
- (block->predecessors->numUsed == 1)) {
- DCHECK_GE(cUnit->dfsOrder.numUsed, 1U);
- int prevIdx = cUnit->dfsOrder.numUsed - 1;
- int prevId = cUnit->dfsOrder.elemList[prevIdx];
- BasicBlock* predBB = (BasicBlock*)block->predecessors->elemList[0];
- if (predBB->id == prevId) {
- block->fallThroughTarget = true;
- }
+ // Can this block be reached only via previous block fallthrough?
+ if ((block->blockType == kDalvikByteCode) &&
+ (block->predecessors->numUsed == 1)) {
+ DCHECK_GE(cUnit->dfsOrder.numUsed, 1U);
+ int prevIdx = cUnit->dfsOrder.numUsed - 1;
+ int prevId = cUnit->dfsOrder.elemList[prevIdx];
+ BasicBlock* predBB = (BasicBlock*)block->predecessors->elemList[0];
+ if (predBB->id == prevId) {
+ block->fallThroughTarget = true;
}
+ }
- /* Enqueue the preOrder block id */
- oatInsertGrowableList(cUnit, &cUnit->dfsOrder, block->id);
+ /* Enqueue the preOrder block id */
+ oatInsertGrowableList(cUnit, &cUnit->dfsOrder, block->id);
- if (block->fallThrough) {
- recordDFSOrders(cUnit, block->fallThrough);
+ if (block->fallThrough) {
+ recordDFSOrders(cUnit, block->fallThrough);
+ }
+ if (block->taken) recordDFSOrders(cUnit, block->taken);
+ if (block->successorBlockList.blockListType != kNotUsed) {
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&block->successorBlockList.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successorBlockInfo =
+ (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
+ if (successorBlockInfo == NULL) break;
+ BasicBlock* succBB = successorBlockInfo->block;
+ recordDFSOrders(cUnit, succBB);
}
- if (block->taken) recordDFSOrders(cUnit, block->taken);
- if (block->successorBlockList.blockListType != kNotUsed) {
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&block->successorBlockList.blocks,
- &iterator);
- while (true) {
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
- if (successorBlockInfo == NULL) break;
- BasicBlock* succBB = successorBlockInfo->block;
- recordDFSOrders(cUnit, succBB);
- }
- }
+ }
- /* Record postorder in basic block and enqueue normal id in dfsPostOrder */
- block->dfsId = cUnit->dfsPostOrder.numUsed;
- oatInsertGrowableList(cUnit, &cUnit->dfsPostOrder, block->id);
- return;
+ /* Record postorder in basic block and enqueue normal id in dfsPostOrder */
+ block->dfsId = cUnit->dfsPostOrder.numUsed;
+ oatInsertGrowableList(cUnit, &cUnit->dfsPostOrder, block->id);
+ return;
}
/* Sort the blocks by the Depth-First-Search */
void computeDFSOrders(CompilationUnit* cUnit)
{
- /* Initialize or reset the DFS preOrder list */
- if (cUnit->dfsOrder.elemList == NULL) {
- oatInitGrowableList(cUnit, &cUnit->dfsOrder, cUnit->numBlocks,
- kListDfsOrder);
- } else {
- /* Just reset the used length on the counter */
- cUnit->dfsOrder.numUsed = 0;
- }
+ /* Initialize or reset the DFS preOrder list */
+ if (cUnit->dfsOrder.elemList == NULL) {
+ oatInitGrowableList(cUnit, &cUnit->dfsOrder, cUnit->numBlocks,
+ kListDfsOrder);
+ } else {
+ /* Just reset the used length on the counter */
+ cUnit->dfsOrder.numUsed = 0;
+ }
- /* Initialize or reset the DFS postOrder list */
- if (cUnit->dfsPostOrder.elemList == NULL) {
- oatInitGrowableList(cUnit, &cUnit->dfsPostOrder, cUnit->numBlocks,
- kListDfsPostOrder);
- } else {
- /* Just reset the used length on the counter */
- cUnit->dfsPostOrder.numUsed = 0;
- }
+ /* Initialize or reset the DFS postOrder list */
+ if (cUnit->dfsPostOrder.elemList == NULL) {
+ oatInitGrowableList(cUnit, &cUnit->dfsPostOrder, cUnit->numBlocks,
+ kListDfsPostOrder);
+ } else {
+ /* Just reset the used length on the counter */
+ cUnit->dfsPostOrder.numUsed = 0;
+ }
- oatDataFlowAnalysisDispatcher(cUnit, oatClearVisitedFlag,
- kAllNodes,
- false /* isIterative */);
+ oatDataFlowAnalysisDispatcher(cUnit, oatClearVisitedFlag,
+ kAllNodes, false /* isIterative */);
- recordDFSOrders(cUnit, cUnit->entryBlock);
- cUnit->numReachableBlocks = cUnit->dfsOrder.numUsed;
+ recordDFSOrders(cUnit, cUnit->entryBlock);
+ cUnit->numReachableBlocks = cUnit->dfsOrder.numUsed;
}
/*
@@ -99,165 +98,163 @@
*/
bool fillDefBlockMatrix(CompilationUnit* cUnit, BasicBlock* bb)
{
- if (bb->dataFlowInfo == NULL) return false;
+ if (bb->dataFlowInfo == NULL) return false;
- ArenaBitVectorIterator iterator;
+ ArenaBitVectorIterator iterator;
- oatBitVectorIteratorInit(bb->dataFlowInfo->defV, &iterator);
- while (true) {
- int idx = oatBitVectorIteratorNext(&iterator);
- if (idx == -1) break;
- /* Block bb defines register idx */
- oatSetBit(cUnit, cUnit->defBlockMatrix[idx], bb->id);
- }
- return true;
+ oatBitVectorIteratorInit(bb->dataFlowInfo->defV, &iterator);
+ while (true) {
+ int idx = oatBitVectorIteratorNext(&iterator);
+ if (idx == -1) break;
+ /* Block bb defines register idx */
+ oatSetBit(cUnit, cUnit->defBlockMatrix[idx], bb->id);
+ }
+ return true;
}
void computeDefBlockMatrix(CompilationUnit* cUnit)
{
- int numRegisters = cUnit->numDalvikRegisters;
- /* Allocate numDalvikRegisters bit vector pointers */
- cUnit->defBlockMatrix = (ArenaBitVector **)
- oatNew(cUnit, sizeof(ArenaBitVector *) * numRegisters, true,
- kAllocDFInfo);
- int i;
+ int numRegisters = cUnit->numDalvikRegisters;
+ /* Allocate numDalvikRegisters bit vector pointers */
+ cUnit->defBlockMatrix = (ArenaBitVector **)
+ oatNew(cUnit, sizeof(ArenaBitVector *) * numRegisters, true,
+ kAllocDFInfo);
+ int i;
- /* Initialize numRegister vectors with numBlocks bits each */
- for (i = 0; i < numRegisters; i++) {
- cUnit->defBlockMatrix[i] = oatAllocBitVector(cUnit, cUnit->numBlocks,
- false, kBitMapBMatrix);
- }
- oatDataFlowAnalysisDispatcher(cUnit, oatFindLocalLiveIn,
- kAllNodes,
- false /* isIterative */);
- oatDataFlowAnalysisDispatcher(cUnit, fillDefBlockMatrix,
- kAllNodes,
- false /* isIterative */);
+ /* Initialize numRegister vectors with numBlocks bits each */
+ for (i = 0; i < numRegisters; i++) {
+ cUnit->defBlockMatrix[i] = oatAllocBitVector(cUnit, cUnit->numBlocks,
+ false, kBitMapBMatrix);
+ }
+ oatDataFlowAnalysisDispatcher(cUnit, oatFindLocalLiveIn,
+ kAllNodes, false /* isIterative */);
+ oatDataFlowAnalysisDispatcher(cUnit, fillDefBlockMatrix,
+ kAllNodes, false /* isIterative */);
- /*
- * Also set the incoming parameters as defs in the entry block.
- * Only need to handle the parameters for the outer method.
- */
- int numRegs = cUnit->numDalvikRegisters;
- int inReg = numRegs - cUnit->numIns;
- for (; inReg < numRegs; inReg++) {
- oatSetBit(cUnit, cUnit->defBlockMatrix[inReg], cUnit->entryBlock->id);
- }
+ /*
+ * Also set the incoming parameters as defs in the entry block.
+ * Only need to handle the parameters for the outer method.
+ */
+ int numRegs = cUnit->numDalvikRegisters;
+ int inReg = numRegs - cUnit->numIns;
+ for (; inReg < numRegs; inReg++) {
+ oatSetBit(cUnit, cUnit->defBlockMatrix[inReg], cUnit->entryBlock->id);
+ }
}
/* Compute the post-order traversal of the CFG */
void computeDomPostOrderTraversal(CompilationUnit* cUnit, BasicBlock* bb)
{
- ArenaBitVectorIterator bvIterator;
- oatBitVectorIteratorInit(bb->iDominated, &bvIterator);
- GrowableList* blockList = &cUnit->blockList;
+ ArenaBitVectorIterator bvIterator;
+ oatBitVectorIteratorInit(bb->iDominated, &bvIterator);
+ GrowableList* blockList = &cUnit->blockList;
- /* Iterate through the dominated blocks first */
- while (true) {
- //TUNING: hot call to oatBitVectorIteratorNext
- int bbIdx = oatBitVectorIteratorNext(&bvIterator);
- if (bbIdx == -1) break;
- BasicBlock* dominatedBB =
- (BasicBlock* ) oatGrowableListGetElement(blockList, bbIdx);
- computeDomPostOrderTraversal(cUnit, dominatedBB);
- }
+ /* Iterate through the dominated blocks first */
+ while (true) {
+ //TUNING: hot call to oatBitVectorIteratorNext
+ int bbIdx = oatBitVectorIteratorNext(&bvIterator);
+ if (bbIdx == -1) break;
+ BasicBlock* dominatedBB =
+ (BasicBlock* ) oatGrowableListGetElement(blockList, bbIdx);
+ computeDomPostOrderTraversal(cUnit, dominatedBB);
+ }
- /* Enter the current block id */
- oatInsertGrowableList(cUnit, &cUnit->domPostOrderTraversal, bb->id);
+ /* Enter the current block id */
+ oatInsertGrowableList(cUnit, &cUnit->domPostOrderTraversal, bb->id);
- /* hacky loop detection */
- if (bb->taken && oatIsBitSet(bb->dominators, bb->taken->id)) {
- cUnit->hasLoop = true;
- }
+ /* hacky loop detection */
+ if (bb->taken && oatIsBitSet(bb->dominators, bb->taken->id)) {
+ cUnit->hasLoop = true;
+ }
}
void checkForDominanceFrontier(CompilationUnit* cUnit, BasicBlock* domBB,
- const BasicBlock* succBB)
+ const BasicBlock* succBB)
{
- /*
- * TODO - evaluate whether phi will ever need to be inserted into exit
- * blocks.
- */
- if (succBB->iDom != domBB &&
- succBB->blockType == kDalvikByteCode &&
- succBB->hidden == false) {
- oatSetBit(cUnit, domBB->domFrontier, succBB->id);
- }
+ /*
+ * TODO - evaluate whether phi will ever need to be inserted into exit
+ * blocks.
+ */
+ if (succBB->iDom != domBB &&
+ succBB->blockType == kDalvikByteCode &&
+ succBB->hidden == false) {
+ oatSetBit(cUnit, domBB->domFrontier, succBB->id);
+ }
}
/* Worker function to compute the dominance frontier */
bool computeDominanceFrontier(CompilationUnit* cUnit, BasicBlock* bb)
{
- GrowableList* blockList = &cUnit->blockList;
+ GrowableList* blockList = &cUnit->blockList;
- /* Calculate DF_local */
- if (bb->taken) {
- checkForDominanceFrontier(cUnit, bb, bb->taken);
- }
- if (bb->fallThrough) {
- checkForDominanceFrontier(cUnit, bb, bb->fallThrough);
- }
- if (bb->successorBlockList.blockListType != kNotUsed) {
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&bb->successorBlockList.blocks,
- &iterator);
- while (true) {
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
- if (successorBlockInfo == NULL) break;
- BasicBlock* succBB = successorBlockInfo->block;
- checkForDominanceFrontier(cUnit, bb, succBB);
- }
- }
+ /* Calculate DF_local */
+ if (bb->taken) {
+ checkForDominanceFrontier(cUnit, bb, bb->taken);
+ }
+ if (bb->fallThrough) {
+ checkForDominanceFrontier(cUnit, bb, bb->fallThrough);
+ }
+ if (bb->successorBlockList.blockListType != kNotUsed) {
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&bb->successorBlockList.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successorBlockInfo =
+ (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
+ if (successorBlockInfo == NULL) break;
+ BasicBlock* succBB = successorBlockInfo->block;
+ checkForDominanceFrontier(cUnit, bb, succBB);
+ }
+ }
- /* Calculate DF_up */
- ArenaBitVectorIterator bvIterator;
- oatBitVectorIteratorInit(bb->iDominated, &bvIterator);
+ /* Calculate DF_up */
+ ArenaBitVectorIterator bvIterator;
+ oatBitVectorIteratorInit(bb->iDominated, &bvIterator);
+ while (true) {
+ //TUNING: hot call to oatBitVectorIteratorNext
+ int dominatedIdx = oatBitVectorIteratorNext(&bvIterator);
+ if (dominatedIdx == -1) break;
+ BasicBlock* dominatedBB = (BasicBlock* )
+ oatGrowableListGetElement(blockList, dominatedIdx);
+ ArenaBitVectorIterator dfIterator;
+ oatBitVectorIteratorInit(dominatedBB->domFrontier, &dfIterator);
while (true) {
- //TUNING: hot call to oatBitVectorIteratorNext
- int dominatedIdx = oatBitVectorIteratorNext(&bvIterator);
- if (dominatedIdx == -1) break;
- BasicBlock* dominatedBB = (BasicBlock* )
- oatGrowableListGetElement(blockList, dominatedIdx);
- ArenaBitVectorIterator dfIterator;
- oatBitVectorIteratorInit(dominatedBB->domFrontier, &dfIterator);
- while (true) {
- //TUNING: hot call to oatBitVectorIteratorNext
- int dfUpIdx = oatBitVectorIteratorNext(&dfIterator);
- if (dfUpIdx == -1) break;
- BasicBlock* dfUpBlock = (BasicBlock* )
- oatGrowableListGetElement(blockList, dfUpIdx);
- checkForDominanceFrontier(cUnit, bb, dfUpBlock);
- }
+ //TUNING: hot call to oatBitVectorIteratorNext
+ int dfUpIdx = oatBitVectorIteratorNext(&dfIterator);
+ if (dfUpIdx == -1) break;
+ BasicBlock* dfUpBlock = (BasicBlock* )
+ oatGrowableListGetElement(blockList, dfUpIdx);
+ checkForDominanceFrontier(cUnit, bb, dfUpBlock);
}
+ }
- return true;
+ return true;
}
/* Worker function for initializing domination-related data structures */
bool initializeDominationInfo(CompilationUnit* cUnit, BasicBlock* bb)
{
- int numTotalBlocks = cUnit->blockList.numUsed;
+ int numTotalBlocks = cUnit->blockList.numUsed;
- if (bb->dominators == NULL ) {
- bb->dominators = oatAllocBitVector(cUnit, numTotalBlocks,
- false /* expandable */,
- kBitMapDominators);
- bb->iDominated = oatAllocBitVector(cUnit, numTotalBlocks,
- false /* expandable */,
- kBitMapIDominated);
- bb->domFrontier = oatAllocBitVector(cUnit, numTotalBlocks,
- false /* expandable */,
- kBitMapDomFrontier);
- } else {
- oatClearAllBits(bb->dominators);
- oatClearAllBits(bb->iDominated);
- oatClearAllBits(bb->domFrontier);
- }
- /* Set all bits in the dominator vector */
- oatSetInitialBits(bb->dominators, numTotalBlocks);
+ if (bb->dominators == NULL ) {
+ bb->dominators = oatAllocBitVector(cUnit, numTotalBlocks,
+ false /* expandable */,
+ kBitMapDominators);
+ bb->iDominated = oatAllocBitVector(cUnit, numTotalBlocks,
+ false /* expandable */,
+ kBitMapIDominated);
+ bb->domFrontier = oatAllocBitVector(cUnit, numTotalBlocks,
+ false /* expandable */,
+ kBitMapDomFrontier);
+ } else {
+ oatClearAllBits(bb->dominators);
+ oatClearAllBits(bb->iDominated);
+ oatClearAllBits(bb->domFrontier);
+ }
+ /* Set all bits in the dominator vector */
+ oatSetInitialBits(bb->dominators, numTotalBlocks);
- return true;
+ return true;
}
/*
@@ -267,35 +264,35 @@
*/
bool slowComputeBlockDominators(CompilationUnit* cUnit, BasicBlock* bb)
{
- GrowableList* blockList = &cUnit->blockList;
- int numTotalBlocks = blockList->numUsed;
- ArenaBitVector* tempBlockV = cUnit->tempBlockV;
- GrowableListIterator iter;
+ GrowableList* blockList = &cUnit->blockList;
+ int numTotalBlocks = blockList->numUsed;
+ ArenaBitVector* tempBlockV = cUnit->tempBlockV;
+ GrowableListIterator iter;
- /*
- * The dominator of the entry block has been preset to itself and we need
- * to skip the calculation here.
- */
- if (bb == cUnit->entryBlock) return false;
+ /*
+ * The dominator of the entry block has been preset to itself and we need
+ * to skip the calculation here.
+ */
+ if (bb == cUnit->entryBlock) return false;
- oatSetInitialBits(tempBlockV, numTotalBlocks);
+ oatSetInitialBits(tempBlockV, numTotalBlocks);
- /* Iterate through the predecessors */
- oatGrowableListIteratorInit(bb->predecessors, &iter);
- while (true) {
- BasicBlock* predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
- if (!predBB) break;
- /* tempBlockV = tempBlockV ^ dominators */
- if (predBB->dominators != NULL) {
- oatIntersectBitVectors(tempBlockV, tempBlockV, predBB->dominators);
- }
+ /* Iterate through the predecessors */
+ oatGrowableListIteratorInit(bb->predecessors, &iter);
+ while (true) {
+ BasicBlock* predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
+ if (!predBB) break;
+ /* tempBlockV = tempBlockV ^ dominators */
+ if (predBB->dominators != NULL) {
+ oatIntersectBitVectors(tempBlockV, tempBlockV, predBB->dominators);
}
- oatSetBit(cUnit, tempBlockV, bb->id);
- if (oatCompareBitVectors(tempBlockV, bb->dominators)) {
- oatCopyBitVector(bb->dominators, tempBlockV);
- return true;
- }
- return false;
+ }
+ oatSetBit(cUnit, tempBlockV, bb->id);
+ if (oatCompareBitVectors(tempBlockV, bb->dominators)) {
+ oatCopyBitVector(bb->dominators, tempBlockV);
+ return true;
+ }
+ return false;
}
/*
@@ -305,44 +302,45 @@
*/
bool slowComputeBlockIDom(CompilationUnit* cUnit, BasicBlock* bb)
{
- GrowableList* blockList = &cUnit->blockList;
- ArenaBitVector* tempBlockV = cUnit->tempBlockV;
- ArenaBitVectorIterator bvIterator;
- BasicBlock* iDom;
+ GrowableList* blockList = &cUnit->blockList;
+ ArenaBitVector* tempBlockV = cUnit->tempBlockV;
+ ArenaBitVectorIterator bvIterator;
+ BasicBlock* iDom;
- if (bb == cUnit->entryBlock) return false;
+ if (bb == cUnit->entryBlock) return false;
- oatCopyBitVector(tempBlockV, bb->dominators);
- oatClearBit(tempBlockV, bb->id);
- oatBitVectorIteratorInit(tempBlockV, &bvIterator);
+ oatCopyBitVector(tempBlockV, bb->dominators);
+ oatClearBit(tempBlockV, bb->id);
+ oatBitVectorIteratorInit(tempBlockV, &bvIterator);
- /* Should not see any dead block */
- DCHECK_NE(oatCountSetBits(tempBlockV), 0);
- if (oatCountSetBits(tempBlockV) == 1) {
- iDom = (BasicBlock* ) oatGrowableListGetElement(
- blockList, oatBitVectorIteratorNext(&bvIterator));
- bb->iDom = iDom;
- } else {
- int iDomIdx = oatBitVectorIteratorNext(&bvIterator);
- DCHECK_NE(iDomIdx, -1);
- while (true) {
- int nextDom = oatBitVectorIteratorNext(&bvIterator);
- if (nextDom == -1) break;
- BasicBlock* nextDomBB = (BasicBlock* )
- oatGrowableListGetElement(blockList, nextDom);
- /* iDom dominates nextDom - set new iDom */
- if (oatIsBitSet(nextDomBB->dominators, iDomIdx)) {
- iDomIdx = nextDom;
- }
+ /* Should not see any dead block */
+ DCHECK_NE(oatCountSetBits(tempBlockV), 0);
+ if (oatCountSetBits(tempBlockV) == 1) {
+ iDom = (BasicBlock* )
+ oatGrowableListGetElement(blockList,
+ oatBitVectorIteratorNext(&bvIterator));
+ bb->iDom = iDom;
+ } else {
+ int iDomIdx = oatBitVectorIteratorNext(&bvIterator);
+ DCHECK_NE(iDomIdx, -1);
+ while (true) {
+ int nextDom = oatBitVectorIteratorNext(&bvIterator);
+ if (nextDom == -1) break;
+ BasicBlock* nextDomBB = (BasicBlock* )
+ oatGrowableListGetElement(blockList, nextDom);
+ /* iDom dominates nextDom - set new iDom */
+ if (oatIsBitSet(nextDomBB->dominators, iDomIdx)) {
+ iDomIdx = nextDom;
+ }
- }
- iDom = (BasicBlock* ) oatGrowableListGetElement(blockList, iDomIdx);
- /* Set the immediate dominator block for bb */
- bb->iDom = iDom;
}
- /* Add bb to the iDominated set of the immediate dominator block */
- oatSetBit(cUnit, iDom->iDominated, bb->id);
- return true;
+ iDom = (BasicBlock* ) oatGrowableListGetElement(blockList, iDomIdx);
+ /* Set the immediate dominator block for bb */
+ bb->iDom = iDom;
+ }
+ /* Add bb to the iDominated set of the immediate dominator block */
+ oatSetBit(cUnit, iDom->iDominated, bb->id);
+ return true;
}
/*
@@ -352,175 +350,174 @@
*/
int findCommonParent(CompilationUnit *cUnit, int block1, int block2)
{
- while (block1 != block2) {
- while (block1 < block2) {
- block1 = cUnit->iDomList[block1];
- DCHECK_NE(block1, NOTVISITED);
- }
- while (block2 < block1) {
- block2 = cUnit->iDomList[block2];
- DCHECK_NE(block2, NOTVISITED);
- }
+ while (block1 != block2) {
+ while (block1 < block2) {
+ block1 = cUnit->iDomList[block1];
+ DCHECK_NE(block1, NOTVISITED);
}
- return block1;
+ while (block2 < block1) {
+ block2 = cUnit->iDomList[block2];
+ DCHECK_NE(block2, NOTVISITED);
+ }
+ }
+ return block1;
}
/* Worker function to compute each block's immediate dominator */
bool computeBlockIDom(CompilationUnit* cUnit, BasicBlock* bb)
{
- GrowableListIterator iter;
- int idom = -1;
+ GrowableListIterator iter;
+ int idom = -1;
- /* Special-case entry block */
- if (bb == cUnit->entryBlock) {
- return false;
- }
-
- /* Iterate through the predecessors */
- oatGrowableListIteratorInit(bb->predecessors, &iter);
-
- /* Find the first processed predecessor */
- while (true) {
- BasicBlock* predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
- CHECK(predBB != NULL);
- if (cUnit->iDomList[predBB->dfsId] != NOTVISITED) {
- idom = predBB->dfsId;
- break;
- }
- }
-
- /* Scan the rest of the predecessors */
- while (true) {
- BasicBlock* predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
- if (!predBB) break;
- if (cUnit->iDomList[predBB->dfsId] == NOTVISITED) {
- continue;
- } else {
- idom = findCommonParent(cUnit, predBB->dfsId, idom);
- }
- }
-
- DCHECK_NE(idom, NOTVISITED);
-
- /* Did something change? */
- if (cUnit->iDomList[bb->dfsId] != idom) {
- cUnit->iDomList[bb->dfsId] = idom;
- return true;
- }
+ /* Special-case entry block */
+ if (bb == cUnit->entryBlock) {
return false;
+ }
+
+ /* Iterate through the predecessors */
+ oatGrowableListIteratorInit(bb->predecessors, &iter);
+
+ /* Find the first processed predecessor */
+ while (true) {
+ BasicBlock* predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
+ CHECK(predBB != NULL);
+ if (cUnit->iDomList[predBB->dfsId] != NOTVISITED) {
+ idom = predBB->dfsId;
+ break;
+ }
+ }
+
+ /* Scan the rest of the predecessors */
+ while (true) {
+ BasicBlock* predBB = (BasicBlock*)oatGrowableListIteratorNext(&iter);
+ if (!predBB) break;
+ if (cUnit->iDomList[predBB->dfsId] == NOTVISITED) {
+ continue;
+ } else {
+ idom = findCommonParent(cUnit, predBB->dfsId, idom);
+ }
+ }
+
+ DCHECK_NE(idom, NOTVISITED);
+
+ /* Did something change? */
+ if (cUnit->iDomList[bb->dfsId] != idom) {
+ cUnit->iDomList[bb->dfsId] = idom;
+ return true;
+ }
+ return false;
}
/* Worker function to compute each block's domintors */
bool computeBlockDominators(CompilationUnit* cUnit, BasicBlock* bb)
{
- if (bb == cUnit->entryBlock) {
- oatClearAllBits(bb->dominators);
- } else {
- oatCopyBitVector(bb->dominators, bb->iDom->dominators);
- }
- oatSetBit(cUnit, bb->dominators, bb->id);
- return false;
+ if (bb == cUnit->entryBlock) {
+ oatClearAllBits(bb->dominators);
+ } else {
+ oatCopyBitVector(bb->dominators, bb->iDom->dominators);
+ }
+ oatSetBit(cUnit, bb->dominators, bb->id);
+ return false;
}
bool setDominators(CompilationUnit* cUnit, BasicBlock* bb)
{
- if (bb != cUnit->entryBlock) {
- int iDomDFSIdx = cUnit->iDomList[bb->dfsId];
- DCHECK_NE(iDomDFSIdx, NOTVISITED);
- int iDomIdx = cUnit->dfsPostOrder.elemList[iDomDFSIdx];
- BasicBlock* iDom = (BasicBlock*)
- oatGrowableListGetElement(&cUnit->blockList, iDomIdx);
- if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
- DCHECK_EQ(bb->iDom->id, iDom->id);
- }
- bb->iDom = iDom;
- /* Add bb to the iDominated set of the immediate dominator block */
- oatSetBit(cUnit, iDom->iDominated, bb->id);
+ if (bb != cUnit->entryBlock) {
+ int iDomDFSIdx = cUnit->iDomList[bb->dfsId];
+ DCHECK_NE(iDomDFSIdx, NOTVISITED);
+ int iDomIdx = cUnit->dfsPostOrder.elemList[iDomDFSIdx];
+ BasicBlock* iDom = (BasicBlock*)
+ oatGrowableListGetElement(&cUnit->blockList, iDomIdx);
+ if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
+ DCHECK_EQ(bb->iDom->id, iDom->id);
}
- return false;
+ bb->iDom = iDom;
+ /* Add bb to the iDominated set of the immediate dominator block */
+ oatSetBit(cUnit, iDom->iDominated, bb->id);
+ }
+ return false;
}
/* Compute dominators, immediate dominator, and dominance fronter */
void computeDominators(CompilationUnit* cUnit)
{
- int numReachableBlocks = cUnit->numReachableBlocks;
- int numTotalBlocks = cUnit->blockList.numUsed;
+ int numReachableBlocks = cUnit->numReachableBlocks;
+ int numTotalBlocks = cUnit->blockList.numUsed;
- /* Initialize domination-related data structures */
- oatDataFlowAnalysisDispatcher(cUnit, initializeDominationInfo,
- kReachableNodes,
- false /* isIterative */);
+ /* Initialize domination-related data structures */
+ oatDataFlowAnalysisDispatcher(cUnit, initializeDominationInfo,
+ kReachableNodes, false /* isIterative */);
- /* Initalize & Clear iDomList */
- if (cUnit->iDomList == NULL) {
- cUnit->iDomList = (int*)oatNew(cUnit, sizeof(int) * numReachableBlocks,
- false, kAllocDFInfo);
- }
- for (int i = 0; i < numReachableBlocks; i++) {
- cUnit->iDomList[i] = NOTVISITED;
- }
+ /* Initalize & Clear iDomList */
+ if (cUnit->iDomList == NULL) {
+ cUnit->iDomList = (int*)oatNew(cUnit, sizeof(int) * numReachableBlocks,
+ false, kAllocDFInfo);
+ }
+ for (int i = 0; i < numReachableBlocks; i++) {
+ cUnit->iDomList[i] = NOTVISITED;
+ }
- /* For post-order, last block is entry block. Set its iDom to istelf */
- DCHECK_EQ(cUnit->entryBlock->dfsId, numReachableBlocks-1);
- cUnit->iDomList[cUnit->entryBlock->dfsId] = cUnit->entryBlock->dfsId;
+ /* For post-order, last block is entry block. Set its iDom to istelf */
+ DCHECK_EQ(cUnit->entryBlock->dfsId, numReachableBlocks-1);
+ cUnit->iDomList[cUnit->entryBlock->dfsId] = cUnit->entryBlock->dfsId;
- /* Compute the immediate dominators */
- oatDataFlowAnalysisDispatcher(cUnit, computeBlockIDom,
- kReversePostOrderTraversal,
+ /* Compute the immediate dominators */
+ oatDataFlowAnalysisDispatcher(cUnit, computeBlockIDom,
+ kReversePostOrderTraversal,
+ true /* isIterative */);
+
+ /* Set the dominator for the root node */
+ oatClearAllBits(cUnit->entryBlock->dominators);
+ oatSetBit(cUnit, cUnit->entryBlock->dominators, cUnit->entryBlock->id);
+
+ if (cUnit->tempBlockV == NULL) {
+ cUnit->tempBlockV = oatAllocBitVector(cUnit, numTotalBlocks,
+ false /* expandable */,
+ kBitMapTmpBlockV);
+ } else {
+ oatClearAllBits(cUnit->tempBlockV);
+ }
+ cUnit->entryBlock->iDom = NULL;
+
+ /* For testing, compute sets using alternate mechanism */
+ if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
+ // Use alternate mechanism to compute dominators for comparison
+ oatDataFlowAnalysisDispatcher(cUnit, slowComputeBlockDominators,
+ kPreOrderDFSTraversal,
true /* isIterative */);
- /* Set the dominator for the root node */
- oatClearAllBits(cUnit->entryBlock->dominators);
- oatSetBit(cUnit, cUnit->entryBlock->dominators, cUnit->entryBlock->id);
+ oatDataFlowAnalysisDispatcher(cUnit, slowComputeBlockIDom,
+ kReachableNodes,
+ false /* isIterative */);
+ }
- if (cUnit->tempBlockV == NULL) {
- cUnit->tempBlockV = oatAllocBitVector(cUnit, numTotalBlocks,
- false /* expandable */,
- kBitMapTmpBlockV);
- } else {
- oatClearAllBits(cUnit->tempBlockV);
- }
- cUnit->entryBlock->iDom = NULL;
+ oatDataFlowAnalysisDispatcher(cUnit, setDominators,
+ kReachableNodes,
+ false /* isIterative */);
- /* For testing, compute sets using alternate mechanism */
- if (cUnit->enableDebug & (1 << kDebugVerifyDataflow)) {
- // Use alternate mechanism to compute dominators for comparison
- oatDataFlowAnalysisDispatcher(cUnit, slowComputeBlockDominators,
- kPreOrderDFSTraversal,
- true /* isIterative */);
+ oatDataFlowAnalysisDispatcher(cUnit, computeBlockDominators,
+ kReversePostOrderTraversal,
+ false /* isIterative */);
- oatDataFlowAnalysisDispatcher(cUnit, slowComputeBlockIDom,
- kReachableNodes,
- false /* isIterative */);
- }
+ /*
+ * Now go ahead and compute the post order traversal based on the
+ * iDominated sets.
+ */
+ if (cUnit->domPostOrderTraversal.elemList == NULL) {
+ oatInitGrowableList(cUnit, &cUnit->domPostOrderTraversal,
+ numReachableBlocks, kListDomPostOrderTraversal);
+ } else {
+ cUnit->domPostOrderTraversal.numUsed = 0;
+ }
- oatDataFlowAnalysisDispatcher(cUnit, setDominators,
- kReachableNodes,
- false /* isIterative */);
+ computeDomPostOrderTraversal(cUnit, cUnit->entryBlock);
+ DCHECK_EQ(cUnit->domPostOrderTraversal.numUsed,
+ (unsigned) cUnit->numReachableBlocks);
- oatDataFlowAnalysisDispatcher(cUnit, computeBlockDominators,
- kReversePostOrderTraversal,
- false /* isIterative */);
-
- /*
- * Now go ahead and compute the post order traversal based on the
- * iDominated sets.
- */
- if (cUnit->domPostOrderTraversal.elemList == NULL) {
- oatInitGrowableList(cUnit, &cUnit->domPostOrderTraversal,
- numReachableBlocks, kListDomPostOrderTraversal);
- } else {
- cUnit->domPostOrderTraversal.numUsed = 0;
- }
-
- computeDomPostOrderTraversal(cUnit, cUnit->entryBlock);
- DCHECK_EQ(cUnit->domPostOrderTraversal.numUsed,
- (unsigned) cUnit->numReachableBlocks);
-
- /* Now compute the dominance frontier for each block */
- oatDataFlowAnalysisDispatcher(cUnit, computeDominanceFrontier,
- kPostOrderDOMTraversal,
- false /* isIterative */);
+ /* Now compute the dominance frontier for each block */
+ oatDataFlowAnalysisDispatcher(cUnit, computeDominanceFrontier,
+ kPostOrderDOMTraversal,
+ false /* isIterative */);
}
/*
@@ -528,20 +525,20 @@
* This is probably not general enough to be placed in BitVector.[ch].
*/
void computeSuccLiveIn(ArenaBitVector* dest,
- const ArenaBitVector* src1,
- const ArenaBitVector* src2)
+ const ArenaBitVector* src1,
+ const ArenaBitVector* src2)
{
- if (dest->storageSize != src1->storageSize ||
- dest->storageSize != src2->storageSize ||
- dest->expandable != src1->expandable ||
- dest->expandable != src2->expandable) {
- LOG(FATAL) << "Incompatible set properties";
- }
+ if (dest->storageSize != src1->storageSize ||
+ dest->storageSize != src2->storageSize ||
+ dest->expandable != src1->expandable ||
+ dest->expandable != src2->expandable) {
+ LOG(FATAL) << "Incompatible set properties";
+ }
- unsigned int idx;
- for (idx = 0; idx < dest->storageSize; idx++) {
- dest->storage[idx] |= src1->storage[idx] & ~src2->storage[idx];
- }
+ unsigned int idx;
+ for (idx = 0; idx < dest->storageSize; idx++) {
+ dest->storage[idx] |= src1->storage[idx] & ~src2->storage[idx];
+ }
}
/*
@@ -551,121 +548,120 @@
*/
bool computeBlockLiveIns(CompilationUnit* cUnit, BasicBlock* bb)
{
- ArenaBitVector* tempDalvikRegisterV = cUnit->tempDalvikRegisterV;
+ ArenaBitVector* tempDalvikRegisterV = cUnit->tempDalvikRegisterV;
- if (bb->dataFlowInfo == NULL) return false;
- oatCopyBitVector(tempDalvikRegisterV, bb->dataFlowInfo->liveInV);
- if (bb->taken && bb->taken->dataFlowInfo)
- computeSuccLiveIn(tempDalvikRegisterV, bb->taken->dataFlowInfo->liveInV,
- bb->dataFlowInfo->defV);
- if (bb->fallThrough && bb->fallThrough->dataFlowInfo)
+ if (bb->dataFlowInfo == NULL) return false;
+ oatCopyBitVector(tempDalvikRegisterV, bb->dataFlowInfo->liveInV);
+ if (bb->taken && bb->taken->dataFlowInfo)
+ computeSuccLiveIn(tempDalvikRegisterV, bb->taken->dataFlowInfo->liveInV,
+ bb->dataFlowInfo->defV);
+ if (bb->fallThrough && bb->fallThrough->dataFlowInfo)
+ computeSuccLiveIn(tempDalvikRegisterV,
+ bb->fallThrough->dataFlowInfo->liveInV,
+ bb->dataFlowInfo->defV);
+ if (bb->successorBlockList.blockListType != kNotUsed) {
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&bb->successorBlockList.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successorBlockInfo =
+ (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
+ if (successorBlockInfo == NULL) break;
+ BasicBlock* succBB = successorBlockInfo->block;
+ if (succBB->dataFlowInfo) {
computeSuccLiveIn(tempDalvikRegisterV,
- bb->fallThrough->dataFlowInfo->liveInV,
+ succBB->dataFlowInfo->liveInV,
bb->dataFlowInfo->defV);
- if (bb->successorBlockList.blockListType != kNotUsed) {
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&bb->successorBlockList.blocks,
- &iterator);
- while (true) {
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
- if (successorBlockInfo == NULL) break;
- BasicBlock* succBB = successorBlockInfo->block;
- if (succBB->dataFlowInfo) {
- computeSuccLiveIn(tempDalvikRegisterV,
- succBB->dataFlowInfo->liveInV,
- bb->dataFlowInfo->defV);
- }
- }
+ }
}
- if (oatCompareBitVectors(tempDalvikRegisterV, bb->dataFlowInfo->liveInV)) {
- oatCopyBitVector(bb->dataFlowInfo->liveInV, tempDalvikRegisterV);
- return true;
- }
- return false;
+ }
+ if (oatCompareBitVectors(tempDalvikRegisterV, bb->dataFlowInfo->liveInV)) {
+ oatCopyBitVector(bb->dataFlowInfo->liveInV, tempDalvikRegisterV);
+ return true;
+ }
+ return false;
}
/* Insert phi nodes to for each variable to the dominance frontiers */
void insertPhiNodes(CompilationUnit* cUnit)
{
- int dalvikReg;
- const GrowableList* blockList = &cUnit->blockList;
- ArenaBitVector* phiBlocks =
- oatAllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapPhi);
- ArenaBitVector* tmpBlocks =
- oatAllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapTmpBlocks);
- ArenaBitVector* inputBlocks =
- oatAllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapInputBlocks);
+ int dalvikReg;
+ const GrowableList* blockList = &cUnit->blockList;
+ ArenaBitVector* phiBlocks =
+ oatAllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapPhi);
+ ArenaBitVector* tmpBlocks =
+ oatAllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapTmpBlocks);
+ ArenaBitVector* inputBlocks =
+ oatAllocBitVector(cUnit, cUnit->numBlocks, false, kBitMapInputBlocks);
- cUnit->tempDalvikRegisterV =
- oatAllocBitVector(cUnit, cUnit->numDalvikRegisters, false,
- kBitMapRegisterV);
+ cUnit->tempDalvikRegisterV =
+ oatAllocBitVector(cUnit, cUnit->numDalvikRegisters, false,
+ kBitMapRegisterV);
- oatDataFlowAnalysisDispatcher(cUnit, computeBlockLiveIns,
- kPostOrderDFSTraversal,
- true /* isIterative */);
+ oatDataFlowAnalysisDispatcher(cUnit, computeBlockLiveIns,
+ kPostOrderDFSTraversal, true /* isIterative */);
- /* Iterate through each Dalvik register */
- for (dalvikReg = 0; dalvikReg < cUnit->numDalvikRegisters; dalvikReg++) {
- bool change;
- ArenaBitVectorIterator iterator;
+ /* Iterate through each Dalvik register */
+ for (dalvikReg = 0; dalvikReg < cUnit->numDalvikRegisters; dalvikReg++) {
+ bool change;
+ ArenaBitVectorIterator iterator;
- oatCopyBitVector(inputBlocks, cUnit->defBlockMatrix[dalvikReg]);
- oatClearAllBits(phiBlocks);
+ oatCopyBitVector(inputBlocks, cUnit->defBlockMatrix[dalvikReg]);
+ oatClearAllBits(phiBlocks);
- /* Calculate the phi blocks for each Dalvik register */
- do {
- change = false;
- oatClearAllBits(tmpBlocks);
- oatBitVectorIteratorInit(inputBlocks, &iterator);
+ /* Calculate the phi blocks for each Dalvik register */
+ do {
+ change = false;
+ oatClearAllBits(tmpBlocks);
+ oatBitVectorIteratorInit(inputBlocks, &iterator);
- while (true) {
- int idx = oatBitVectorIteratorNext(&iterator);
- if (idx == -1) break;
- BasicBlock* defBB =
- (BasicBlock* ) oatGrowableListGetElement(blockList, idx);
+ while (true) {
+ int idx = oatBitVectorIteratorNext(&iterator);
+ if (idx == -1) break;
+ BasicBlock* defBB =
+ (BasicBlock* ) oatGrowableListGetElement(blockList, idx);
- /* Merge the dominance frontier to tmpBlocks */
- //TUNING: hot call to oatUnifyBitVectors
- if (defBB->domFrontier != NULL) {
- oatUnifyBitVectors(tmpBlocks, tmpBlocks, defBB->domFrontier);
- }
- }
- if (oatCompareBitVectors(phiBlocks, tmpBlocks)) {
- change = true;
- oatCopyBitVector(phiBlocks, tmpBlocks);
-
- /*
- * Iterate through the original blocks plus the new ones in
- * the dominance frontier.
- */
- oatCopyBitVector(inputBlocks, phiBlocks);
- oatUnifyBitVectors(inputBlocks, inputBlocks,
- cUnit->defBlockMatrix[dalvikReg]);
- }
- } while (change);
-
- /*
- * Insert a phi node for dalvikReg in the phiBlocks if the Dalvik
- * register is in the live-in set.
- */
- oatBitVectorIteratorInit(phiBlocks, &iterator);
- while (true) {
- int idx = oatBitVectorIteratorNext(&iterator);
- if (idx == -1) break;
- BasicBlock* phiBB =
- (BasicBlock* ) oatGrowableListGetElement(blockList, idx);
- /* Variable will be clobbered before being used - no need for phi */
- if (!oatIsBitSet(phiBB->dataFlowInfo->liveInV, dalvikReg)) continue;
- MIR *phi = (MIR *) oatNew(cUnit, sizeof(MIR), true, kAllocDFInfo);
- phi->dalvikInsn.opcode = (Instruction::Code)kMirOpPhi;
- phi->dalvikInsn.vA = dalvikReg;
- phi->offset = phiBB->startOffset;
- phi->meta.phiNext = cUnit->phiList;
- cUnit->phiList = phi;
- oatPrependMIR(phiBB, phi);
+ /* Merge the dominance frontier to tmpBlocks */
+ //TUNING: hot call to oatUnifyBitVectors
+ if (defBB->domFrontier != NULL) {
+ oatUnifyBitVectors(tmpBlocks, tmpBlocks, defBB->domFrontier);
+ }
}
+ if (oatCompareBitVectors(phiBlocks, tmpBlocks)) {
+ change = true;
+ oatCopyBitVector(phiBlocks, tmpBlocks);
+
+ /*
+ * Iterate through the original blocks plus the new ones in
+ * the dominance frontier.
+ */
+ oatCopyBitVector(inputBlocks, phiBlocks);
+ oatUnifyBitVectors(inputBlocks, inputBlocks,
+ cUnit->defBlockMatrix[dalvikReg]);
+ }
+ } while (change);
+
+ /*
+ * Insert a phi node for dalvikReg in the phiBlocks if the Dalvik
+ * register is in the live-in set.
+ */
+ oatBitVectorIteratorInit(phiBlocks, &iterator);
+ while (true) {
+ int idx = oatBitVectorIteratorNext(&iterator);
+ if (idx == -1) break;
+ BasicBlock* phiBB =
+ (BasicBlock* ) oatGrowableListGetElement(blockList, idx);
+ /* Variable will be clobbered before being used - no need for phi */
+ if (!oatIsBitSet(phiBB->dataFlowInfo->liveInV, dalvikReg)) continue;
+ MIR *phi = (MIR *) oatNew(cUnit, sizeof(MIR), true, kAllocDFInfo);
+ phi->dalvikInsn.opcode = (Instruction::Code)kMirOpPhi;
+ phi->dalvikInsn.vA = dalvikReg;
+ phi->offset = phiBB->startOffset;
+ phi->meta.phiNext = cUnit->phiList;
+ cUnit->phiList = phi;
+ oatPrependMIR(phiBB, phi);
}
+ }
}
/*
@@ -674,139 +670,136 @@
*/
bool insertPhiNodeOperands(CompilationUnit* cUnit, BasicBlock* bb)
{
- ArenaBitVector* ssaRegV = cUnit->tempSSARegisterV;
- GrowableListIterator iter;
- MIR *mir;
+ ArenaBitVector* ssaRegV = cUnit->tempSSARegisterV;
+ GrowableListIterator iter;
+ MIR *mir;
- /* Phi nodes are at the beginning of each block */
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
- if (mir->dalvikInsn.opcode != (Instruction::Code)kMirOpPhi)
- return true;
- int ssaReg = mir->ssaRep->defs[0];
- DCHECK_GE(ssaReg, 0); // Shouldn't see compiler temps here
- int vReg = SRegToVReg(cUnit, ssaReg);
+ /* Phi nodes are at the beginning of each block */
+ for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+ if (mir->dalvikInsn.opcode != (Instruction::Code)kMirOpPhi)
+ return true;
+ int ssaReg = mir->ssaRep->defs[0];
+ DCHECK_GE(ssaReg, 0); // Shouldn't see compiler temps here
+ int vReg = SRegToVReg(cUnit, ssaReg);
- oatClearAllBits(ssaRegV);
+ oatClearAllBits(ssaRegV);
- /* Iterate through the predecessors */
- oatGrowableListIteratorInit(bb->predecessors, &iter);
- while (true) {
- BasicBlock* predBB =
- (BasicBlock*)oatGrowableListIteratorNext(&iter);
- if (!predBB) break;
- int ssaReg =
- predBB->dataFlowInfo->vRegToSSAMap[vReg];
- oatSetBit(cUnit, ssaRegV, ssaReg);
- }
-
- /* Count the number of SSA registers for a Dalvik register */
- int numUses = oatCountSetBits(ssaRegV);
- mir->ssaRep->numUses = numUses;
- mir->ssaRep->uses =
- (int *) oatNew(cUnit, sizeof(int) * numUses, false, kAllocDFInfo);
- mir->ssaRep->fpUse =
- (bool *) oatNew(cUnit, sizeof(bool) * numUses, true, kAllocDFInfo);
-
- ArenaBitVectorIterator phiIterator;
-
- oatBitVectorIteratorInit(ssaRegV, &phiIterator);
- int *usePtr = mir->ssaRep->uses;
-
- /* Set the uses array for the phi node */
- while (true) {
- int ssaRegIdx = oatBitVectorIteratorNext(&phiIterator);
- if (ssaRegIdx == -1) break;
- *usePtr++ = ssaRegIdx;
- }
+ /* Iterate through the predecessors */
+ oatGrowableListIteratorInit(bb->predecessors, &iter);
+ while (true) {
+ BasicBlock* predBB =
+ (BasicBlock*)oatGrowableListIteratorNext(&iter);
+ if (!predBB) break;
+ int ssaReg = predBB->dataFlowInfo->vRegToSSAMap[vReg];
+ oatSetBit(cUnit, ssaRegV, ssaReg);
}
- return true;
+ /* Count the number of SSA registers for a Dalvik register */
+ int numUses = oatCountSetBits(ssaRegV);
+ mir->ssaRep->numUses = numUses;
+ mir->ssaRep->uses =
+ (int *) oatNew(cUnit, sizeof(int) * numUses, false, kAllocDFInfo);
+ mir->ssaRep->fpUse =
+ (bool *) oatNew(cUnit, sizeof(bool) * numUses, true, kAllocDFInfo);
+
+ ArenaBitVectorIterator phiIterator;
+
+ oatBitVectorIteratorInit(ssaRegV, &phiIterator);
+ int *usePtr = mir->ssaRep->uses;
+
+ /* Set the uses array for the phi node */
+ while (true) {
+ int ssaRegIdx = oatBitVectorIteratorNext(&phiIterator);
+ if (ssaRegIdx == -1) break;
+ *usePtr++ = ssaRegIdx;
+ }
+ }
+
+ return true;
}
void doDFSPreOrderSSARename(CompilationUnit* cUnit, BasicBlock* block)
{
- if (block->visited || block->hidden) return;
- block->visited = true;
+ if (block->visited || block->hidden) return;
+ block->visited = true;
- /* Process this block */
- oatDoSSAConversion(cUnit, block);
- int mapSize = sizeof(int) * cUnit->numDalvikRegisters;
+ /* Process this block */
+ oatDoSSAConversion(cUnit, block);
+ int mapSize = sizeof(int) * cUnit->numDalvikRegisters;
- /* Save SSA map snapshot */
- int* savedSSAMap = (int*)oatNew(cUnit, mapSize, false,
- kAllocDalvikToSSAMap);
- memcpy(savedSSAMap, cUnit->vRegToSSAMap, mapSize);
+ /* Save SSA map snapshot */
+ int* savedSSAMap = (int*)oatNew(cUnit, mapSize, false,
+ kAllocDalvikToSSAMap);
+ memcpy(savedSSAMap, cUnit->vRegToSSAMap, mapSize);
- if (block->fallThrough) {
- doDFSPreOrderSSARename(cUnit, block->fallThrough);
- /* Restore SSA map snapshot */
- memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
+ if (block->fallThrough) {
+ doDFSPreOrderSSARename(cUnit, block->fallThrough);
+ /* Restore SSA map snapshot */
+ memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
+ }
+ if (block->taken) {
+ doDFSPreOrderSSARename(cUnit, block->taken);
+ /* Restore SSA map snapshot */
+ memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
+ }
+ if (block->successorBlockList.blockListType != kNotUsed) {
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&block->successorBlockList.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successorBlockInfo =
+ (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
+ if (successorBlockInfo == NULL) break;
+ BasicBlock* succBB = successorBlockInfo->block;
+ doDFSPreOrderSSARename(cUnit, succBB);
+ /* Restore SSA map snapshot */
+ memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
}
- if (block->taken) {
- doDFSPreOrderSSARename(cUnit, block->taken);
- /* Restore SSA map snapshot */
- memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
- }
- if (block->successorBlockList.blockListType != kNotUsed) {
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&block->successorBlockList.blocks,
- &iterator);
- while (true) {
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iterator);
- if (successorBlockInfo == NULL) break;
- BasicBlock* succBB = successorBlockInfo->block;
- doDFSPreOrderSSARename(cUnit, succBB);
- /* Restore SSA map snapshot */
- memcpy(cUnit->vRegToSSAMap, savedSSAMap, mapSize);
- }
- }
- cUnit->vRegToSSAMap = savedSSAMap;
- return;
+ }
+ cUnit->vRegToSSAMap = savedSSAMap;
+ return;
}
/* Perform SSA transformation for the whole method */
void oatMethodSSATransformation(CompilationUnit* cUnit)
{
- /* Compute the DFS order */
- computeDFSOrders(cUnit);
+ /* Compute the DFS order */
+ computeDFSOrders(cUnit);
- if (!cUnit->disableDataflow) {
- /* Compute the dominator info */
- computeDominators(cUnit);
- }
+ if (!cUnit->disableDataflow) {
+ /* Compute the dominator info */
+ computeDominators(cUnit);
+ }
- /* Allocate data structures in preparation for SSA conversion */
- oatInitializeSSAConversion(cUnit);
+ /* Allocate data structures in preparation for SSA conversion */
+ oatInitializeSSAConversion(cUnit);
- if (!cUnit->disableDataflow) {
- /* Find out the "Dalvik reg def x block" relation */
- computeDefBlockMatrix(cUnit);
+ if (!cUnit->disableDataflow) {
+ /* Find out the "Dalvik reg def x block" relation */
+ computeDefBlockMatrix(cUnit);
- /* Insert phi nodes to dominance frontiers for all variables */
- insertPhiNodes(cUnit);
- }
+ /* Insert phi nodes to dominance frontiers for all variables */
+ insertPhiNodes(cUnit);
+ }
- /* Rename register names by local defs and phi nodes */
- oatDataFlowAnalysisDispatcher(cUnit, oatClearVisitedFlag,
- kAllNodes,
- false /* isIterative */);
- doDFSPreOrderSSARename(cUnit, cUnit->entryBlock);
+ /* Rename register names by local defs and phi nodes */
+ oatDataFlowAnalysisDispatcher(cUnit, oatClearVisitedFlag,
+ kAllNodes, false /* isIterative */);
+ doDFSPreOrderSSARename(cUnit, cUnit->entryBlock);
- if (!cUnit->disableDataflow) {
- /*
- * Shared temp bit vector used by each block to count the number of defs
- * from all the predecessor blocks.
- */
- cUnit->tempSSARegisterV = oatAllocBitVector(cUnit, cUnit->numSSARegs,
- false, kBitMapTempSSARegisterV);
+ if (!cUnit->disableDataflow) {
+ /*
+ * Shared temp bit vector used by each block to count the number of defs
+ * from all the predecessor blocks.
+ */
+ cUnit->tempSSARegisterV = oatAllocBitVector(cUnit, cUnit->numSSARegs,
+ false, kBitMapTempSSARegisterV);
- /* Insert phi-operands with latest SSA names from predecessor blocks */
- oatDataFlowAnalysisDispatcher(cUnit, insertPhiNodeOperands,
- kReachableNodes,
- false /* isIterative */);
- }
+ /* Insert phi-operands with latest SSA names from predecessor blocks */
+ oatDataFlowAnalysisDispatcher(cUnit, insertPhiNodeOperands,
+ kReachableNodes, false /* isIterative */);
+ }
}
} // namespace art
diff --git a/src/compiler/Utility.cc b/src/compiler/Utility.cc
index f5b478c..571208f 100644
--- a/src/compiler/Utility.cc
+++ b/src/compiler/Utility.cc
@@ -21,64 +21,64 @@
#ifdef WITH_MEMSTATS
struct Memstats {
- u4 allocStats[kNumAllocKinds];
- int listSizes[kNumListKinds];
- int listWasted[kNumListKinds];
- int listGrows[kNumListKinds];
- int listMaxElems[kNumListKinds];
- int bitMapSizes[kNumBitMapKinds];
- int bitMapWasted[kNumBitMapKinds];
- int bitMapGrows[kNumBitMapKinds];
+ u4 allocStats[kNumAllocKinds];
+ int listSizes[kNumListKinds];
+ int listWasted[kNumListKinds];
+ int listGrows[kNumListKinds];
+ int listMaxElems[kNumListKinds];
+ int bitMapSizes[kNumBitMapKinds];
+ int bitMapWasted[kNumBitMapKinds];
+ int bitMapGrows[kNumBitMapKinds];
};
const char* allocNames[kNumAllocKinds] = {
- "Misc ",
- "BasicBlock ",
- "LIR ",
- "MIR ",
- "DataFlow ",
- "GrowList ",
- "GrowBitMap ",
- "Dalvik2SSA ",
- "DebugInfo ",
- "Successor ",
- "RegAlloc ",
- "Data ",
- "Preds ",
+ "Misc ",
+ "BasicBlock ",
+ "LIR ",
+ "MIR ",
+ "DataFlow ",
+ "GrowList ",
+ "GrowBitMap ",
+ "Dalvik2SSA ",
+ "DebugInfo ",
+ "Successor ",
+ "RegAlloc ",
+ "Data ",
+ "Preds ",
};
const char* listNames[kNumListKinds] = {
- "Misc ",
- "blockList ",
- "SSAtoDalvik ",
- "dfsOrder ",
- "dfsPostOrder ",
- "domPostOrderTraversal ",
- "throwLaunchPads ",
- "suspendLaunchPads ",
- "switchTables ",
- "fillArrayData ",
- "SuccessorBlocks ",
- "Predecessors ",
+ "Misc ",
+ "blockList ",
+ "SSAtoDalvik ",
+ "dfsOrder ",
+ "dfsPostOrder ",
+ "domPostOrderTraversal ",
+ "throwLaunchPads ",
+ "suspendLaunchPads ",
+ "switchTables ",
+ "fillArrayData ",
+ "SuccessorBlocks ",
+ "Predecessors ",
};
const char* bitMapNames[kNumBitMapKinds] = {
- "Misc ",
- "Use ",
- "Def ",
- "LiveIn ",
- "BlockMatrix ",
- "Dominators ",
- "IDominated ",
- "DomFrontier ",
- "Phi ",
- "TmpBlocks ",
- "InputBlocks ",
- "RegisterV ",
- "TempSSARegisterV ",
- "Null Check ",
- "TmpBlockV ",
- "Predecessors ",
+ "Misc ",
+ "Use ",
+ "Def ",
+ "LiveIn ",
+ "BlockMatrix ",
+ "Dominators ",
+ "IDominated ",
+ "DomFrontier ",
+ "Phi ",
+ "TmpBlocks ",
+ "InputBlocks ",
+ "RegisterV ",
+ "TempSSARegisterV ",
+ "Null Check ",
+ "TmpBlockV ",
+ "Predecessors ",
};
#endif
@@ -87,266 +87,265 @@
/* Allocate the initial memory block for arena-based allocation */
bool oatHeapInit(CompilationUnit* cUnit)
{
- DCHECK(cUnit->arenaHead == NULL);
- cUnit->arenaHead =
- (ArenaMemBlock *) malloc(sizeof(ArenaMemBlock) + ARENA_DEFAULT_SIZE);
- if (cUnit->arenaHead == NULL) {
- LOG(FATAL) << "No memory left to create compiler heap memory";
- }
- cUnit->arenaHead->blockSize = ARENA_DEFAULT_SIZE;
- cUnit->currentArena = cUnit->arenaHead;
- cUnit->currentArena->bytesAllocated = 0;
- cUnit->currentArena->next = NULL;
- cUnit->numArenaBlocks = 1;
+ DCHECK(cUnit->arenaHead == NULL);
+ cUnit->arenaHead =
+ (ArenaMemBlock *) malloc(sizeof(ArenaMemBlock) + ARENA_DEFAULT_SIZE);
+ if (cUnit->arenaHead == NULL) {
+ LOG(FATAL) << "No memory left to create compiler heap memory";
+ }
+ cUnit->arenaHead->blockSize = ARENA_DEFAULT_SIZE;
+ cUnit->currentArena = cUnit->arenaHead;
+ cUnit->currentArena->bytesAllocated = 0;
+ cUnit->currentArena->next = NULL;
+ cUnit->numArenaBlocks = 1;
#ifdef WITH_MEMSTATS
- cUnit->mstats = (Memstats*) oatNew(cUnit, sizeof(Memstats), true,
- kAllocDebugInfo);
+ cUnit->mstats = (Memstats*) oatNew(cUnit, sizeof(Memstats), true,
+ kAllocDebugInfo);
#endif
- return true;
+ return true;
}
/* Arena-based malloc for compilation tasks */
void* oatNew(CompilationUnit* cUnit, size_t size, bool zero, oatAllocKind kind)
{
- size = (size + 3) & ~3;
+ size = (size + 3) & ~3;
#ifdef WITH_MEMSTATS
- if (cUnit->mstats != NULL) {
- cUnit->mstats->allocStats[kind] += size;
- }
+ if (cUnit->mstats != NULL) {
+ cUnit->mstats->allocStats[kind] += size;
+ }
#endif
retry:
- /* Normal case - space is available in the current page */
- if (size + cUnit->currentArena->bytesAllocated <=
- cUnit->currentArena->blockSize) {
- void *ptr;
- ptr = &cUnit->currentArena->ptr[cUnit->currentArena->bytesAllocated];
- cUnit->currentArena->bytesAllocated += size;
- if (zero) {
- memset(ptr, 0, size);
- }
- return ptr;
- } else {
- /*
- * See if there are previously allocated arena blocks before the last
- * reset
- */
- if (cUnit->currentArena->next) {
- cUnit->currentArena = cUnit->currentArena->next;
- cUnit->currentArena->bytesAllocated = 0;
- goto retry;
- }
-
- size_t blockSize = (size < ARENA_DEFAULT_SIZE) ?
- ARENA_DEFAULT_SIZE : size;
- /* Time to allocate a new arena */
- ArenaMemBlock *newArena = (ArenaMemBlock *)
- malloc(sizeof(ArenaMemBlock) + blockSize);
- if (newArena == NULL) {
- LOG(FATAL) << "Arena allocation failure";
- }
- newArena->blockSize = blockSize;
- newArena->bytesAllocated = 0;
- newArena->next = NULL;
- cUnit->currentArena->next = newArena;
- cUnit->currentArena = newArena;
- cUnit->numArenaBlocks++;
- if (cUnit->numArenaBlocks > 20000) {
- LOG(INFO) << "Total arena pages: " << cUnit->numArenaBlocks;
- }
+ /* Normal case - space is available in the current page */
+ if (size + cUnit->currentArena->bytesAllocated <=
+ cUnit->currentArena->blockSize) {
+ void *ptr;
+ ptr = &cUnit->currentArena->ptr[cUnit->currentArena->bytesAllocated];
+ cUnit->currentArena->bytesAllocated += size;
+ if (zero) {
+ memset(ptr, 0, size);
+ }
+ return ptr;
+ } else {
+ /*
+ * See if there are previously allocated arena blocks before the last
+ * reset
+ */
+ if (cUnit->currentArena->next) {
+ cUnit->currentArena = cUnit->currentArena->next;
+ cUnit->currentArena->bytesAllocated = 0;
goto retry;
}
+
+ size_t blockSize = (size < ARENA_DEFAULT_SIZE) ? ARENA_DEFAULT_SIZE : size;
+ /* Time to allocate a new arena */
+ ArenaMemBlock *newArena = (ArenaMemBlock *)
+ malloc(sizeof(ArenaMemBlock) + blockSize);
+ if (newArena == NULL) {
+ LOG(FATAL) << "Arena allocation failure";
+ }
+ newArena->blockSize = blockSize;
+ newArena->bytesAllocated = 0;
+ newArena->next = NULL;
+ cUnit->currentArena->next = newArena;
+ cUnit->currentArena = newArena;
+ cUnit->numArenaBlocks++;
+ if (cUnit->numArenaBlocks > 20000) {
+ LOG(INFO) << "Total arena pages: " << cUnit->numArenaBlocks;
+ }
+ goto retry;
+ }
}
/* Reclaim all the arena blocks allocated so far */
void oatArenaReset(CompilationUnit* cUnit)
{
- ArenaMemBlock* head = cUnit->arenaHead;
- while (head != NULL) {
- ArenaMemBlock* p = head;
- head = head->next;
- free(p);
- }
- cUnit->arenaHead = NULL;
- cUnit->currentArena = NULL;
+ ArenaMemBlock* head = cUnit->arenaHead;
+ while (head != NULL) {
+ ArenaMemBlock* p = head;
+ head = head->next;
+ free(p);
+ }
+ cUnit->arenaHead = NULL;
+ cUnit->currentArena = NULL;
}
/* Growable List initialization */
void oatInitGrowableList(CompilationUnit* cUnit, GrowableList* gList,
- size_t initLength, oatListKind kind)
+ size_t initLength, oatListKind kind)
{
- gList->numAllocated = initLength;
- gList->numUsed = 0;
- gList->elemList = (intptr_t *) oatNew(cUnit, sizeof(intptr_t) * initLength,
- true, kAllocGrowableList);
+ gList->numAllocated = initLength;
+ gList->numUsed = 0;
+ gList->elemList = (intptr_t *) oatNew(cUnit, sizeof(intptr_t) * initLength,
+ true, kAllocGrowableList);
#ifdef WITH_MEMSTATS
- cUnit->mstats->listSizes[kind] += sizeof(intptr_t) * initLength;
- gList->kind = kind;
- if ((int)initLength > cUnit->mstats->listMaxElems[kind]) {
- cUnit->mstats->listMaxElems[kind] = initLength;
- }
+ cUnit->mstats->listSizes[kind] += sizeof(intptr_t) * initLength;
+ gList->kind = kind;
+ if ((int)initLength > cUnit->mstats->listMaxElems[kind]) {
+ cUnit->mstats->listMaxElems[kind] = initLength;
+ }
#endif
}
/* Expand the capacity of a growable list */
void expandGrowableList(CompilationUnit* cUnit, GrowableList* gList)
{
- int newLength = gList->numAllocated;
- if (newLength < 128) {
- newLength <<= 1;
- } else {
- newLength += 128;
- }
- intptr_t *newArray =
- (intptr_t *) oatNew(cUnit, sizeof(intptr_t) * newLength, true,
- kAllocGrowableList);
- memcpy(newArray, gList->elemList, sizeof(intptr_t) * gList->numAllocated);
+ int newLength = gList->numAllocated;
+ if (newLength < 128) {
+ newLength <<= 1;
+ } else {
+ newLength += 128;
+ }
+ intptr_t *newArray =
+ (intptr_t *) oatNew(cUnit, sizeof(intptr_t) * newLength, true,
+ kAllocGrowableList);
+ memcpy(newArray, gList->elemList, sizeof(intptr_t) * gList->numAllocated);
#ifdef WITH_MEMSTATS
- cUnit->mstats->listSizes[gList->kind] += sizeof(intptr_t) * newLength;
- cUnit->mstats->listWasted[gList->kind] +=
- sizeof(intptr_t) * gList->numAllocated;
- cUnit->mstats->listGrows[gList->kind]++;
- if (newLength > cUnit->mstats->listMaxElems[gList->kind]) {
- cUnit->mstats->listMaxElems[gList->kind] = newLength;
- }
+ cUnit->mstats->listSizes[gList->kind] += sizeof(intptr_t) * newLength;
+ cUnit->mstats->listWasted[gList->kind] +=
+ sizeof(intptr_t) * gList->numAllocated;
+ cUnit->mstats->listGrows[gList->kind]++;
+ if (newLength > cUnit->mstats->listMaxElems[gList->kind]) {
+ cUnit->mstats->listMaxElems[gList->kind] = newLength;
+ }
#endif
- gList->numAllocated = newLength;
- gList->elemList = newArray;
+ gList->numAllocated = newLength;
+ gList->elemList = newArray;
}
/* Insert a new element into the growable list */
void oatInsertGrowableList(CompilationUnit* cUnit, GrowableList* gList,
- intptr_t elem)
+ intptr_t elem)
{
- DCHECK_NE(gList->numAllocated, 0U);
- if (gList->numUsed == gList->numAllocated) {
- expandGrowableList(cUnit, gList);
- }
- gList->elemList[gList->numUsed++] = elem;
+ DCHECK_NE(gList->numAllocated, 0U);
+ if (gList->numUsed == gList->numAllocated) {
+ expandGrowableList(cUnit, gList);
+ }
+ gList->elemList[gList->numUsed++] = elem;
}
/* Delete an element from a growable list. Element must be present */
void oatDeleteGrowableList(GrowableList* gList, intptr_t elem)
{
- bool found = false;
- for (unsigned int i = 0; i < gList->numUsed; i++) {
- if (!found && gList->elemList[i] == elem) {
- found = true;
- }
- if (found) {
- gList->elemList[i] = gList->elemList[i+1];
- }
+ bool found = false;
+ for (unsigned int i = 0; i < gList->numUsed; i++) {
+ if (!found && gList->elemList[i] == elem) {
+ found = true;
}
- DCHECK_EQ(found, true);
- gList->numUsed--;
+ if (found) {
+ gList->elemList[i] = gList->elemList[i+1];
+ }
+ }
+ DCHECK_EQ(found, true);
+ gList->numUsed--;
}
void oatGrowableListIteratorInit(GrowableList* gList,
- GrowableListIterator* iterator)
+ GrowableListIterator* iterator)
{
- iterator->list = gList;
- iterator->idx = 0;
- iterator->size = gList->numUsed;
+ iterator->list = gList;
+ iterator->idx = 0;
+ iterator->size = gList->numUsed;
}
intptr_t oatGrowableListIteratorNext(GrowableListIterator* iterator)
{
- DCHECK_EQ(iterator->size, iterator->list->numUsed);
- if (iterator->idx == iterator->size) return 0;
- return iterator->list->elemList[iterator->idx++];
+ DCHECK_EQ(iterator->size, iterator->list->numUsed);
+ if (iterator->idx == iterator->size) return 0;
+ return iterator->list->elemList[iterator->idx++];
}
intptr_t oatGrowableListGetElement(const GrowableList* gList, size_t idx)
{
- DCHECK_LT(idx, gList->numUsed);
- return gList->elemList[idx];
+ DCHECK_LT(idx, gList->numUsed);
+ return gList->elemList[idx];
}
#ifdef WITH_MEMSTATS
/* Dump memory usage stats */
void oatDumpMemStats(CompilationUnit* cUnit)
{
- u4 total = 0;
- for (int i = 0; i < kNumAllocKinds; i++) {
- total += cUnit->mstats->allocStats[i];
+ u4 total = 0;
+ for (int i = 0; i < kNumAllocKinds; i++) {
+ total += cUnit->mstats->allocStats[i];
+ }
+ if (total > (10 * 1024 * 1024)) {
+ LOG(INFO) << "MEMUSAGE: " << total << " : "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ LOG(INFO) << "insnsSize: " << cUnit->insnsSize;
+ if (cUnit->disableDataflow) {
+ LOG(INFO) << " ** Dataflow disabled ** ";
}
- if (total > (10 * 1024 * 1024)) {
- LOG(INFO) << "MEMUSAGE: " << total << " : "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- LOG(INFO) << "insnsSize: " << cUnit->insnsSize;
- if (cUnit->disableDataflow) {
- LOG(INFO) << " ** Dataflow disabled ** ";
- }
- LOG(INFO) << "===== Overall allocations";
- for (int i = 0; i < kNumAllocKinds; i++) {
- LOG(INFO) << allocNames[i] << std::setw(10) <<
- cUnit->mstats->allocStats[i];
- }
- LOG(INFO) << "===== GrowableList allocations";
- for (int i = 0; i < kNumListKinds; i++) {
- LOG(INFO) << listNames[i]
+ LOG(INFO) << "===== Overall allocations";
+ for (int i = 0; i < kNumAllocKinds; i++) {
+ LOG(INFO) << allocNames[i] << std::setw(10) <<
+ cUnit->mstats->allocStats[i];
+ }
+ LOG(INFO) << "===== GrowableList allocations";
+ for (int i = 0; i < kNumListKinds; i++) {
+ LOG(INFO) << listNames[i]
<< " S:" << cUnit->mstats->listSizes[i]
<< ", W:" << cUnit->mstats->listWasted[i]
<< ", G:" << cUnit->mstats->listGrows[i]
<< ", E:" << cUnit->mstats->listMaxElems[i];
- }
- LOG(INFO) << "===== GrowableBitMap allocations";
- for (int i = 0; i < kNumBitMapKinds; i++) {
- LOG(INFO) << bitMapNames[i]
+ }
+ LOG(INFO) << "===== GrowableBitMap allocations";
+ for (int i = 0; i < kNumBitMapKinds; i++) {
+ LOG(INFO) << bitMapNames[i]
<< " S:" << cUnit->mstats->bitMapSizes[i]
<< ", W:" << cUnit->mstats->bitMapWasted[i]
<< ", G:" << cUnit->mstats->bitMapGrows[i];
- }
}
+ }
}
#endif
/* Debug Utility - dump a compilation unit */
void oatDumpCompilationUnit(CompilationUnit* cUnit)
{
- BasicBlock* bb;
- const char* blockTypeNames[] = {
- "Entry Block",
- "Code Block",
- "Exit Block",
- "Exception Handling",
- "Catch Block"
- };
+ BasicBlock* bb;
+ const char* blockTypeNames[] = {
+ "Entry Block",
+ "Code Block",
+ "Exit Block",
+ "Exception Handling",
+ "Catch Block"
+ };
- LOG(INFO) << "Compiling " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- LOG(INFO) << cUnit->insns << " insns";
- LOG(INFO) << cUnit->numBlocks << " blocks in total";
- GrowableListIterator iterator;
+ LOG(INFO) << "Compiling " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ LOG(INFO) << cUnit->insns << " insns";
+ LOG(INFO) << cUnit->numBlocks << " blocks in total";
+ GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->blockList, &iterator);
+ oatGrowableListIteratorInit(&cUnit->blockList, &iterator);
- while (true) {
- bb = (BasicBlock *) oatGrowableListIteratorNext(&iterator);
- if (bb == NULL) break;
- LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
- bb->id,
- blockTypeNames[bb->blockType],
- bb->startOffset,
- bb->lastMIRInsn ? bb->lastMIRInsn->offset : bb->startOffset,
- bb->lastMIRInsn ? "" : " empty");
- if (bb->taken) {
- LOG(INFO) << " Taken branch: block " << bb->taken->id <<
- "(0x" << std::hex << bb->taken->startOffset << ")";
- }
- if (bb->fallThrough) {
- LOG(INFO) << " Fallthrough : block " << bb->fallThrough->id <<
- " (0x" << std::hex << bb->fallThrough->startOffset << ")";
- }
+ while (true) {
+ bb = (BasicBlock *) oatGrowableListIteratorNext(&iterator);
+ if (bb == NULL) break;
+ LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
+ bb->id,
+ blockTypeNames[bb->blockType],
+ bb->startOffset,
+ bb->lastMIRInsn ? bb->lastMIRInsn->offset : bb->startOffset,
+ bb->lastMIRInsn ? "" : " empty");
+ if (bb->taken) {
+ LOG(INFO) << " Taken branch: block " << bb->taken->id
+ << "(0x" << std::hex << bb->taken->startOffset << ")";
}
+ if (bb->fallThrough) {
+ LOG(INFO) << " Fallthrough : block " << bb->fallThrough->id
+ << " (0x" << std::hex << bb->fallThrough->startOffset << ")";
+ }
+ }
}
static uint32_t checkMasks[32] = {
- 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
- 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
- 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
- 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
- 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
- 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
- 0x40000000, 0x80000000 };
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+ 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
+ 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
+ 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+ 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
+ 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
+ 0x40000000, 0x80000000 };
/*
* Allocate a bit vector with enough space to hold at least the specified
@@ -355,28 +354,28 @@
* NOTE: memory is allocated from the compiler arena.
*/
ArenaBitVector* oatAllocBitVector(CompilationUnit* cUnit,
- unsigned int startBits, bool expandable,
- oatBitMapKind kind)
+ unsigned int startBits, bool expandable,
+ oatBitMapKind kind)
{
- ArenaBitVector* bv;
- unsigned int count;
+ ArenaBitVector* bv;
+ unsigned int count;
- DCHECK_EQ(sizeof(bv->storage[0]), 4U); /* assuming 32-bit units */
+ DCHECK_EQ(sizeof(bv->storage[0]), 4U); /* assuming 32-bit units */
- bv = (ArenaBitVector*) oatNew(cUnit, sizeof(ArenaBitVector), false,
- kAllocGrowableBitMap);
+ bv = (ArenaBitVector*) oatNew(cUnit, sizeof(ArenaBitVector), false,
+ kAllocGrowableBitMap);
- count = (startBits + 31) >> 5;
+ count = (startBits + 31) >> 5;
- bv->storageSize = count;
- bv->expandable = expandable;
- bv->storage = (u4*) oatNew(cUnit, count * sizeof(u4), true,
- kAllocGrowableBitMap);
+ bv->storageSize = count;
+ bv->expandable = expandable;
+ bv->storage = (u4*) oatNew(cUnit, count * sizeof(u4), true,
+ kAllocGrowableBitMap);
#ifdef WITH_MEMSTATS
- bv->kind = kind;
- cUnit->mstats->bitMapSizes[kind] += count * sizeof(u4);
+ bv->kind = kind;
+ cUnit->mstats->bitMapSizes[kind] += count * sizeof(u4);
#endif
- return bv;
+ return bv;
}
/*
@@ -384,10 +383,10 @@
*/
bool oatIsBitSet(const ArenaBitVector* pBits, unsigned int num)
{
- DCHECK_LT(num, pBits->storageSize * sizeof(u4) * 8);
+ DCHECK_LT(num, pBits->storageSize * sizeof(u4) * 8);
- unsigned int val = pBits->storage[num >> 5] & checkMasks[num & 0x1f];
- return (val != 0);
+ unsigned int val = pBits->storage[num >> 5] & checkMasks[num & 0x1f];
+ return (val != 0);
}
/*
@@ -395,8 +394,8 @@
*/
void oatClearAllBits(ArenaBitVector* pBits)
{
- unsigned int count = pBits->storageSize;
- memset(pBits->storage, 0, count * sizeof(u4));
+ unsigned int count = pBits->storageSize;
+ memset(pBits->storage, 0, count * sizeof(u4));
}
/*
@@ -409,31 +408,31 @@
*/
bool oatSetBit(CompilationUnit* cUnit, ArenaBitVector* pBits, unsigned int num)
{
- if (num >= pBits->storageSize * sizeof(u4) * 8) {
- if (!pBits->expandable) {
- LOG(FATAL) << "Can't expand";
- }
-
- /* Round up to word boundaries for "num+1" bits */
- unsigned int newSize = (num + 1 + 31) >> 5;
- DCHECK_GT(newSize, pBits->storageSize);
- u4 *newStorage = (u4*)oatNew(cUnit, newSize * sizeof(u4), false,
- kAllocGrowableBitMap);
- memcpy(newStorage, pBits->storage, pBits->storageSize * sizeof(u4));
- memset(&newStorage[pBits->storageSize], 0,
- (newSize - pBits->storageSize) * sizeof(u4));
-#ifdef WITH_MEMSTATS
- cUnit->mstats->bitMapWasted[pBits->kind] +=
- pBits->storageSize * sizeof(u4);
- cUnit->mstats->bitMapSizes[pBits->kind] += newSize * sizeof(u4);
- cUnit->mstats->bitMapGrows[pBits->kind]++;
-#endif
- pBits->storage = newStorage;
- pBits->storageSize = newSize;
+ if (num >= pBits->storageSize * sizeof(u4) * 8) {
+ if (!pBits->expandable) {
+ LOG(FATAL) << "Can't expand";
}
- pBits->storage[num >> 5] |= checkMasks[num & 0x1f];
- return true;
+ /* Round up to word boundaries for "num+1" bits */
+ unsigned int newSize = (num + 1 + 31) >> 5;
+ DCHECK_GT(newSize, pBits->storageSize);
+ u4 *newStorage = (u4*)oatNew(cUnit, newSize * sizeof(u4), false,
+ kAllocGrowableBitMap);
+ memcpy(newStorage, pBits->storage, pBits->storageSize * sizeof(u4));
+ memset(&newStorage[pBits->storageSize], 0,
+ (newSize - pBits->storageSize) * sizeof(u4));
+#ifdef WITH_MEMSTATS
+ cUnit->mstats->bitMapWasted[pBits->kind] +=
+ pBits->storageSize * sizeof(u4);
+ cUnit->mstats->bitMapSizes[pBits->kind] += newSize * sizeof(u4);
+ cUnit->mstats->bitMapGrows[pBits->kind]++;
+#endif
+ pBits->storage = newStorage;
+ pBits->storageSize = newSize;
+ }
+
+ pBits->storage[num >> 5] |= checkMasks[num & 0x1f];
+ return true;
}
/*
@@ -446,12 +445,12 @@
*/
bool oatClearBit(ArenaBitVector* pBits, unsigned int num)
{
- if (num >= pBits->storageSize * sizeof(u4) * 8) {
- LOG(FATAL) << "Attempt to clear a bit not set in the vector yet";;
- }
+ if (num >= pBits->storageSize * sizeof(u4) * 8) {
+ LOG(FATAL) << "Attempt to clear a bit not set in the vector yet";;
+ }
- pBits->storage[num >> 5] &= ~checkMasks[num & 0x1f];
- return true;
+ pBits->storage[num >> 5] &= ~checkMasks[num & 0x1f];
+ return true;
}
/*
@@ -459,50 +458,49 @@
*/
void oatMarkAllBits(ArenaBitVector* pBits, bool set)
{
- int value = set ? -1 : 0;
- memset(pBits->storage, value, pBits->storageSize * (int)sizeof(u4));
+ int value = set ? -1 : 0;
+ memset(pBits->storage, value, pBits->storageSize * (int)sizeof(u4));
}
void oatDebugBitVector(char* msg, const ArenaBitVector* bv, int length)
{
- int i;
+ int i;
- LOG(INFO) << msg;
- for (i = 0; i < length; i++) {
- if (oatIsBitSet(bv, i)) {
- LOG(INFO) << " Bit " << i << " is set";
- }
+ LOG(INFO) << msg;
+ for (i = 0; i < length; i++) {
+ if (oatIsBitSet(bv, i)) {
+ LOG(INFO) << " Bit " << i << " is set";
}
+ }
}
void oatAbort(CompilationUnit* cUnit)
{
- LOG(FATAL) << "Compiler aborting";
+ LOG(FATAL) << "Compiler aborting";
}
void oatDumpBlockBitVector(const GrowableList* blocks, char* msg,
- const ArenaBitVector* bv, int length)
+ const ArenaBitVector* bv, int length)
{
- int i;
+ int i;
- LOG(INFO) << msg;
- for (i = 0; i < length; i++) {
- if (oatIsBitSet(bv, i)) {
- BasicBlock *bb =
- (BasicBlock *) oatGrowableListGetElement(blocks, i);
- char blockName[BLOCK_NAME_LEN];
- oatGetBlockName(bb, blockName);
- LOG(INFO) << "Bit " << i << " / " << blockName << " is set";
- }
+ LOG(INFO) << msg;
+ for (i = 0; i < length; i++) {
+ if (oatIsBitSet(bv, i)) {
+ BasicBlock *bb = (BasicBlock *) oatGrowableListGetElement(blocks, i);
+ char blockName[BLOCK_NAME_LEN];
+ oatGetBlockName(bb, blockName);
+ LOG(INFO) << "Bit " << i << " / " << blockName << " is set";
}
+ }
}
/* Initialize the iterator structure */
void oatBitVectorIteratorInit(ArenaBitVector* pBits,
- ArenaBitVectorIterator* iterator)
+ ArenaBitVectorIterator* iterator)
{
- iterator->pBits = pBits;
- iterator->bitSize = pBits->storageSize * sizeof(u4) * 8;
- iterator->idx = 0;
+ iterator->pBits = pBits;
+ iterator->bitSize = pBits->storageSize * sizeof(u4) * 8;
+ iterator->idx = 0;
}
/*
@@ -510,10 +508,10 @@
*/
void checkSizes(const ArenaBitVector* bv1, const ArenaBitVector* bv2)
{
- if (bv1->storageSize != bv2->storageSize) {
- LOG(FATAL) << "Mismatched vector sizes (" << bv1->storageSize <<
- ", " << bv2->storageSize << ")";
- }
+ if (bv1->storageSize != bv2->storageSize) {
+ LOG(FATAL) << "Mismatched vector sizes (" << bv1->storageSize
+ << ", " << bv2->storageSize << ")";
+ }
}
/*
@@ -522,10 +520,10 @@
*/
void oatCopyBitVector(ArenaBitVector* dest, const ArenaBitVector* src)
{
- /* if dest is expandable and < src, we could expand dest to match */
- checkSizes(dest, src);
+ /* if dest is expandable and < src, we could expand dest to match */
+ checkSizes(dest, src);
- memcpy(dest->storage, src->storage, sizeof(u4) * dest->storageSize);
+ memcpy(dest->storage, src->storage, sizeof(u4) * dest->storageSize);
}
/*
@@ -533,72 +531,72 @@
*/
bool oatIntersectBitVectors(ArenaBitVector* dest, const ArenaBitVector* src1,
- const ArenaBitVector* src2)
+ const ArenaBitVector* src2)
{
- DCHECK(src1 != NULL);
- DCHECK(src2 != NULL);
- if (dest->storageSize != src1->storageSize ||
- dest->storageSize != src2->storageSize ||
- dest->expandable != src1->expandable ||
- dest->expandable != src2->expandable)
- return false;
+ DCHECK(src1 != NULL);
+ DCHECK(src2 != NULL);
+ if (dest->storageSize != src1->storageSize ||
+ dest->storageSize != src2->storageSize ||
+ dest->expandable != src1->expandable ||
+ dest->expandable != src2->expandable)
+ return false;
- unsigned int idx;
- for (idx = 0; idx < dest->storageSize; idx++) {
- dest->storage[idx] = src1->storage[idx] & src2->storage[idx];
- }
- return true;
+ unsigned int idx;
+ for (idx = 0; idx < dest->storageSize; idx++) {
+ dest->storage[idx] = src1->storage[idx] & src2->storage[idx];
+ }
+ return true;
}
/*
* Unify two bit vectors and store the result to the dest vector.
*/
bool oatUnifyBitVectors(ArenaBitVector* dest, const ArenaBitVector* src1,
- const ArenaBitVector* src2)
+ const ArenaBitVector* src2)
{
- DCHECK(src1 != NULL);
- DCHECK(src2 != NULL);
- if (dest->storageSize != src1->storageSize ||
- dest->storageSize != src2->storageSize ||
- dest->expandable != src1->expandable ||
- dest->expandable != src2->expandable)
- return false;
+ DCHECK(src1 != NULL);
+ DCHECK(src2 != NULL);
+ if (dest->storageSize != src1->storageSize ||
+ dest->storageSize != src2->storageSize ||
+ dest->expandable != src1->expandable ||
+ dest->expandable != src2->expandable)
+ return false;
- unsigned int idx;
- for (idx = 0; idx < dest->storageSize; idx++) {
- dest->storage[idx] = src1->storage[idx] | src2->storage[idx];
- }
- return true;
+ unsigned int idx;
+ for (idx = 0; idx < dest->storageSize; idx++) {
+ dest->storage[idx] = src1->storage[idx] | src2->storage[idx];
+ }
+ return true;
}
/*
* Return true if any bits collide. Vectors must be same size.
*/
bool oatTestBitVectors(const ArenaBitVector* src1,
- const ArenaBitVector* src2)
+ const ArenaBitVector* src2)
{
- DCHECK_EQ(src1->storageSize, src2->storageSize);
- for (uint32_t idx = 0; idx < src1->storageSize; idx++) {
- if (src1->storage[idx] & src2->storage[idx]) return true;
- }
- return false;
+ DCHECK_EQ(src1->storageSize, src2->storageSize);
+ for (uint32_t idx = 0; idx < src1->storageSize; idx++) {
+ if (src1->storage[idx] & src2->storage[idx]) return true;
+ }
+ return false;
}
/*
* Compare two bit vectors and return true if difference is seen.
*/
bool oatCompareBitVectors(const ArenaBitVector* src1,
- const ArenaBitVector* src2)
+ const ArenaBitVector* src2)
{
- if (src1->storageSize != src2->storageSize ||
- src1->expandable != src2->expandable)
- return true;
+ if (src1->storageSize != src2->storageSize ||
+ src1->expandable != src2->expandable)
+ return true;
- unsigned int idx;
- for (idx = 0; idx < src1->storageSize; idx++) {
- if (src1->storage[idx] != src2->storage[idx]) return true;
- }
- return false;
+ unsigned int idx;
+ for (idx = 0; idx < src1->storageSize; idx++) {
+ if (src1->storage[idx] != src2->storage[idx]) return true;
+ }
+ return false;
}
/*
@@ -606,65 +604,65 @@
*/
int oatCountSetBits(const ArenaBitVector* pBits)
{
- unsigned int word;
- unsigned int count = 0;
+ unsigned int word;
+ unsigned int count = 0;
- for (word = 0; word < pBits->storageSize; word++) {
- u4 val = pBits->storage[word];
+ for (word = 0; word < pBits->storageSize; word++) {
+ u4 val = pBits->storage[word];
- if (val != 0) {
- if (val == 0xffffffff) {
- count += 32;
- } else {
- /* count the number of '1' bits */
- while (val != 0) {
- val &= val - 1;
- count++;
- }
- }
+ if (val != 0) {
+ if (val == 0xffffffff) {
+ count += 32;
+ } else {
+ /* count the number of '1' bits */
+ while (val != 0) {
+ val &= val - 1;
+ count++;
}
+ }
}
+ }
- return count;
+ return count;
}
/* Return the next position set to 1. -1 means end-of-element reached */
int oatBitVectorIteratorNext(ArenaBitVectorIterator* iterator)
{
- ArenaBitVector* pBits = iterator->pBits;
- u4 bitIndex = iterator->idx;
- u4 bitSize = iterator->bitSize;
+ ArenaBitVector* pBits = iterator->pBits;
+ u4 bitIndex = iterator->idx;
+ u4 bitSize = iterator->bitSize;
- DCHECK_EQ(bitSize, pBits->storageSize * sizeof(u4) * 8);
+ DCHECK_EQ(bitSize, pBits->storageSize * sizeof(u4) * 8);
- if (bitIndex >= bitSize) return -1;
+ if (bitIndex >= bitSize) return -1;
- u4 wordIndex = bitIndex >> 5;
- u4 endWordIndex = bitSize >> 5;
- u4* storage = pBits->storage;
- u4 word = storage[wordIndex++];
+ u4 wordIndex = bitIndex >> 5;
+ u4 endWordIndex = bitSize >> 5;
+ u4* storage = pBits->storage;
+ u4 word = storage[wordIndex++];
- // Mask out any bits in the first word we've already considered
- word &= ~((1 << (bitIndex & 0x1f))-1);
+ // Mask out any bits in the first word we've already considered
+ word &= ~((1 << (bitIndex & 0x1f))-1);
- for (; wordIndex <= endWordIndex;) {
- u4 bitPos = bitIndex & 0x1f;
- if (word == 0) {
- bitIndex += (32 - bitPos);
- word = storage[wordIndex++];
- continue;
- }
- for (; bitPos < 32; bitPos++) {
- if (word & (1 << bitPos)) {
- iterator->idx = bitIndex + 1;
- return bitIndex;
- }
- bitIndex++;
- }
- word = storage[wordIndex++];
+ for (; wordIndex <= endWordIndex;) {
+ u4 bitPos = bitIndex & 0x1f;
+ if (word == 0) {
+ bitIndex += (32 - bitPos);
+ word = storage[wordIndex++];
+ continue;
}
- iterator->idx = iterator->bitSize;
- return -1;
+ for (; bitPos < 32; bitPos++) {
+ if (word & (1 << bitPos)) {
+ iterator->idx = bitIndex + 1;
+ return bitIndex;
+ }
+ bitIndex++;
+ }
+ word = storage[wordIndex++];
+ }
+ iterator->idx = iterator->bitSize;
+ return -1;
}
/*
@@ -674,42 +672,42 @@
*/
void oatSetInitialBits(ArenaBitVector* pBits, unsigned int numBits)
{
- unsigned int idx;
- DCHECK_LE(((numBits + 31) >> 5), pBits->storageSize);
- for (idx = 0; idx < (numBits >> 5); idx++) {
- pBits->storage[idx] = -1;
- }
- unsigned int remNumBits = numBits & 0x1f;
- if (remNumBits) {
- pBits->storage[idx] = (1 << remNumBits) - 1;
- }
+ unsigned int idx;
+ DCHECK_LE(((numBits + 31) >> 5), pBits->storageSize);
+ for (idx = 0; idx < (numBits >> 5); idx++) {
+ pBits->storage[idx] = -1;
+ }
+ unsigned int remNumBits = numBits & 0x1f;
+ if (remNumBits) {
+ pBits->storage[idx] = (1 << remNumBits) - 1;
+ }
}
void oatGetBlockName(BasicBlock* bb, char* name)
{
- switch (bb->blockType) {
- case kEntryBlock:
- snprintf(name, BLOCK_NAME_LEN, "entry");
- break;
- case kExitBlock:
- snprintf(name, BLOCK_NAME_LEN, "exit");
- break;
- case kDalvikByteCode:
- snprintf(name, BLOCK_NAME_LEN, "block%04x", bb->startOffset);
- break;
- case kExceptionHandling:
- snprintf(name, BLOCK_NAME_LEN, "exception%04x", bb->startOffset);
- break;
- default:
- snprintf(name, BLOCK_NAME_LEN, "??");
- break;
- }
+ switch (bb->blockType) {
+ case kEntryBlock:
+ snprintf(name, BLOCK_NAME_LEN, "entry");
+ break;
+ case kExitBlock:
+ snprintf(name, BLOCK_NAME_LEN, "exit");
+ break;
+ case kDalvikByteCode:
+ snprintf(name, BLOCK_NAME_LEN, "block%04x", bb->startOffset);
+ break;
+ case kExceptionHandling:
+ snprintf(name, BLOCK_NAME_LEN, "exception%04x", bb->startOffset);
+ break;
+ default:
+ snprintf(name, BLOCK_NAME_LEN, "??");
+ break;
+ }
}
const char* oatGetShortyFromTargetIdx(CompilationUnit *cUnit, int targetIdx)
{
- const DexFile::MethodId& methodId = cUnit->dex_file->GetMethodId(targetIdx);
- return cUnit->dex_file->GetShorty(methodId.proto_idx_);
+ const DexFile::MethodId& methodId = cUnit->dex_file->GetMethodId(targetIdx);
+ return cUnit->dex_file->GetShorty(methodId.proto_idx_);
}
} // namespace art
diff --git a/src/compiler/codegen/CodegenFactory.cc b/src/compiler/codegen/CodegenFactory.cc
index 1b64af2..3a7cb82 100644
--- a/src/compiler/codegen/CodegenFactory.cc
+++ b/src/compiler/codegen/CodegenFactory.cc
@@ -33,25 +33,25 @@
*/
LIR* loadConstant(CompilationUnit* cUnit, int rDest, int value)
{
- if (oatIsTemp(cUnit, rDest)) {
- oatClobber(cUnit, rDest);
- oatMarkInUse(cUnit, rDest);
- }
- return loadConstantNoClobber(cUnit, rDest, value);
+ if (oatIsTemp(cUnit, rDest)) {
+ oatClobber(cUnit, rDest);
+ oatMarkInUse(cUnit, rDest);
+ }
+ return loadConstantNoClobber(cUnit, rDest, value);
}
/* Load a word at base + displacement. Displacement must be word multiple */
LIR* loadWordDisp(CompilationUnit* cUnit, int rBase, int displacement,
int rDest)
{
- return loadBaseDisp(cUnit, NULL, rBase, displacement, rDest, kWord,
- INVALID_SREG);
+ return loadBaseDisp(cUnit, NULL, rBase, displacement, rDest, kWord,
+ INVALID_SREG);
}
LIR* storeWordDisp(CompilationUnit* cUnit, int rBase, int displacement,
int rSrc)
{
- return storeBaseDisp(cUnit, rBase, displacement, rSrc, kWord);
+ return storeBaseDisp(cUnit, rBase, displacement, rSrc, kWord);
}
/*
@@ -61,14 +61,14 @@
*/
void loadValueDirect(CompilationUnit* cUnit, RegLocation rlSrc, int rDest)
{
- rlSrc = oatUpdateLoc(cUnit, rlSrc);
- if (rlSrc.location == kLocPhysReg) {
- opRegCopy(cUnit, rDest, rlSrc.lowReg);
- } else {
- DCHECK((rlSrc.location == kLocDalvikFrame) ||
- (rlSrc.location == kLocCompilerTemp));
- loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, rlSrc.sRegLow), rDest);
- }
+ rlSrc = oatUpdateLoc(cUnit, rlSrc);
+ if (rlSrc.location == kLocPhysReg) {
+ opRegCopy(cUnit, rDest, rlSrc.lowReg);
+ } else {
+ DCHECK((rlSrc.location == kLocDalvikFrame) ||
+ (rlSrc.location == kLocCompilerTemp));
+ loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, rlSrc.sRegLow), rDest);
+ }
}
/*
@@ -78,9 +78,9 @@
*/
void loadValueDirectFixed(CompilationUnit* cUnit, RegLocation rlSrc, int rDest)
{
- oatClobber(cUnit, rDest);
- oatMarkInUse(cUnit, rDest);
- loadValueDirect(cUnit, rlSrc, rDest);
+ oatClobber(cUnit, rDest);
+ oatMarkInUse(cUnit, rDest);
+ loadValueDirect(cUnit, rlSrc, rDest);
}
/*
@@ -89,18 +89,17 @@
* register liveness. That is the responsibility of the caller.
*/
void loadValueDirectWide(CompilationUnit* cUnit, RegLocation rlSrc, int regLo,
- int regHi)
+ int regHi)
{
- rlSrc = oatUpdateLocWide(cUnit, rlSrc);
- if (rlSrc.location == kLocPhysReg) {
- opRegCopyWide(cUnit, regLo, regHi, rlSrc.lowReg, rlSrc.highReg);
- } else {
- DCHECK((rlSrc.location == kLocDalvikFrame) ||
- (rlSrc.location == kLocCompilerTemp));
- loadBaseDispWide(cUnit, NULL, rSP,
- oatSRegOffset(cUnit, rlSrc.sRegLow),
- regLo, regHi, INVALID_SREG);
- }
+ rlSrc = oatUpdateLocWide(cUnit, rlSrc);
+ if (rlSrc.location == kLocPhysReg) {
+ opRegCopyWide(cUnit, regLo, regHi, rlSrc.lowReg, rlSrc.highReg);
+ } else {
+ DCHECK((rlSrc.location == kLocDalvikFrame) ||
+ (rlSrc.location == kLocCompilerTemp));
+ loadBaseDispWide(cUnit, NULL, rSP, oatSRegOffset(cUnit, rlSrc.sRegLow),
+ regLo, regHi, INVALID_SREG);
+ }
}
/*
@@ -111,161 +110,159 @@
void loadValueDirectWideFixed(CompilationUnit* cUnit, RegLocation rlSrc,
int regLo, int regHi)
{
- oatClobber(cUnit, regLo);
- oatClobber(cUnit, regHi);
- oatMarkInUse(cUnit, regLo);
- oatMarkInUse(cUnit, regHi);
- loadValueDirectWide(cUnit, rlSrc, regLo, regHi);
+ oatClobber(cUnit, regLo);
+ oatClobber(cUnit, regHi);
+ oatMarkInUse(cUnit, regLo);
+ oatMarkInUse(cUnit, regHi);
+ loadValueDirectWide(cUnit, rlSrc, regLo, regHi);
}
RegLocation loadValue(CompilationUnit* cUnit, RegLocation rlSrc,
RegisterClass opKind)
{
- rlSrc = oatEvalLoc(cUnit, rlSrc, opKind, false);
- if (rlSrc.location != kLocPhysReg) {
- DCHECK((rlSrc.location == kLocDalvikFrame) ||
- (rlSrc.location == kLocCompilerTemp));
- loadValueDirect(cUnit, rlSrc, rlSrc.lowReg);
- rlSrc.location = kLocPhysReg;
- oatMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
- }
- return rlSrc;
+ rlSrc = oatEvalLoc(cUnit, rlSrc, opKind, false);
+ if (rlSrc.location != kLocPhysReg) {
+ DCHECK((rlSrc.location == kLocDalvikFrame) ||
+ (rlSrc.location == kLocCompilerTemp));
+ loadValueDirect(cUnit, rlSrc, rlSrc.lowReg);
+ rlSrc.location = kLocPhysReg;
+ oatMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
+ }
+ return rlSrc;
}
void storeValue(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
{
#ifndef NDEBUG
- /*
- * Sanity checking - should never try to store to the same
- * ssa name during the compilation of a single instruction
- * without an intervening oatClobberSReg().
- */
- DCHECK((cUnit->liveSReg == INVALID_SREG) ||
- (rlDest.sRegLow != cUnit->liveSReg));
- cUnit->liveSReg = rlDest.sRegLow;
+ /*
+ * Sanity checking - should never try to store to the same
+ * ssa name during the compilation of a single instruction
+ * without an intervening oatClobberSReg().
+ */
+ DCHECK((cUnit->liveSReg == INVALID_SREG) ||
+ (rlDest.sRegLow != cUnit->liveSReg));
+ cUnit->liveSReg = rlDest.sRegLow;
#endif
- LIR* defStart;
- LIR* defEnd;
- DCHECK(!rlDest.wide);
- DCHECK(!rlSrc.wide);
- rlSrc = oatUpdateLoc(cUnit, rlSrc);
- rlDest = oatUpdateLoc(cUnit, rlDest);
- if (rlSrc.location == kLocPhysReg) {
- if (oatIsLive(cUnit, rlSrc.lowReg) ||
- oatIsPromoted(cUnit, rlSrc.lowReg) ||
- (rlDest.location == kLocPhysReg)) {
- // Src is live/promoted or Dest has assigned reg.
- rlDest = oatEvalLoc(cUnit, rlDest, kAnyReg, false);
- opRegCopy(cUnit, rlDest.lowReg, rlSrc.lowReg);
- } else {
- // Just re-assign the registers. Dest gets Src's regs
- rlDest.lowReg = rlSrc.lowReg;
- oatClobber(cUnit, rlSrc.lowReg);
- }
+ LIR* defStart;
+ LIR* defEnd;
+ DCHECK(!rlDest.wide);
+ DCHECK(!rlSrc.wide);
+ rlSrc = oatUpdateLoc(cUnit, rlSrc);
+ rlDest = oatUpdateLoc(cUnit, rlDest);
+ if (rlSrc.location == kLocPhysReg) {
+ if (oatIsLive(cUnit, rlSrc.lowReg) ||
+ oatIsPromoted(cUnit, rlSrc.lowReg) ||
+ (rlDest.location == kLocPhysReg)) {
+ // Src is live/promoted or Dest has assigned reg.
+ rlDest = oatEvalLoc(cUnit, rlDest, kAnyReg, false);
+ opRegCopy(cUnit, rlDest.lowReg, rlSrc.lowReg);
} else {
- // Load Src either into promoted Dest or temps allocated for Dest
- rlDest = oatEvalLoc(cUnit, rlDest, kAnyReg, false);
- loadValueDirect(cUnit, rlSrc, rlDest.lowReg);
+ // Just re-assign the registers. Dest gets Src's regs
+ rlDest.lowReg = rlSrc.lowReg;
+ oatClobber(cUnit, rlSrc.lowReg);
}
+ } else {
+ // Load Src either into promoted Dest or temps allocated for Dest
+ rlDest = oatEvalLoc(cUnit, rlDest, kAnyReg, false);
+ loadValueDirect(cUnit, rlSrc, rlDest.lowReg);
+ }
- // Dest is now live and dirty (until/if we flush it to home location)
- oatMarkLive(cUnit, rlDest.lowReg, rlDest.sRegLow);
- oatMarkDirty(cUnit, rlDest);
+ // Dest is now live and dirty (until/if we flush it to home location)
+ oatMarkLive(cUnit, rlDest.lowReg, rlDest.sRegLow);
+ oatMarkDirty(cUnit, rlDest);
- oatResetDefLoc(cUnit, rlDest);
- if (oatIsDirty(cUnit, rlDest.lowReg) &&
- oatLiveOut(cUnit, rlDest.sRegLow)) {
- defStart = (LIR* )cUnit->lastLIRInsn;
- storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, rlDest.sRegLow),
- rlDest.lowReg, kWord);
- oatMarkClean(cUnit, rlDest);
- defEnd = (LIR* )cUnit->lastLIRInsn;
- oatMarkDef(cUnit, rlDest, defStart, defEnd);
- }
+ oatResetDefLoc(cUnit, rlDest);
+ if (oatIsDirty(cUnit, rlDest.lowReg) &&
+ oatLiveOut(cUnit, rlDest.sRegLow)) {
+ defStart = (LIR* )cUnit->lastLIRInsn;
+ storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, rlDest.sRegLow),
+ rlDest.lowReg, kWord);
+ oatMarkClean(cUnit, rlDest);
+ defEnd = (LIR* )cUnit->lastLIRInsn;
+ oatMarkDef(cUnit, rlDest, defStart, defEnd);
+ }
}
RegLocation loadValueWide(CompilationUnit* cUnit, RegLocation rlSrc,
- RegisterClass opKind)
+ RegisterClass opKind)
{
- DCHECK(rlSrc.wide);
- rlSrc = oatEvalLoc(cUnit, rlSrc, opKind, false);
- if (rlSrc.location != kLocPhysReg) {
- DCHECK((rlSrc.location == kLocDalvikFrame) ||
- (rlSrc.location == kLocCompilerTemp));
- loadValueDirectWide(cUnit, rlSrc, rlSrc.lowReg, rlSrc.highReg);
- rlSrc.location = kLocPhysReg;
- oatMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
- oatMarkLive(cUnit, rlSrc.highReg,
- oatSRegHi(rlSrc.sRegLow));
- }
- return rlSrc;
+ DCHECK(rlSrc.wide);
+ rlSrc = oatEvalLoc(cUnit, rlSrc, opKind, false);
+ if (rlSrc.location != kLocPhysReg) {
+ DCHECK((rlSrc.location == kLocDalvikFrame) ||
+ (rlSrc.location == kLocCompilerTemp));
+ loadValueDirectWide(cUnit, rlSrc, rlSrc.lowReg, rlSrc.highReg);
+ rlSrc.location = kLocPhysReg;
+ oatMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
+ oatMarkLive(cUnit, rlSrc.highReg,
+ oatSRegHi(rlSrc.sRegLow));
+ }
+ return rlSrc;
}
void storeValueWide(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc)
+ RegLocation rlSrc)
{
#ifndef NDEBUG
- /*
- * Sanity checking - should never try to store to the same
- * ssa name during the compilation of a single instruction
- * without an intervening oatClobberSReg().
- */
- DCHECK((cUnit->liveSReg == INVALID_SREG) ||
- (rlDest.sRegLow != cUnit->liveSReg));
- cUnit->liveSReg = rlDest.sRegLow;
+ /*
+ * Sanity checking - should never try to store to the same
+ * ssa name during the compilation of a single instruction
+ * without an intervening oatClobberSReg().
+ */
+ DCHECK((cUnit->liveSReg == INVALID_SREG) ||
+ (rlDest.sRegLow != cUnit->liveSReg));
+ cUnit->liveSReg = rlDest.sRegLow;
#endif
- LIR* defStart;
- LIR* defEnd;
- DCHECK_EQ(FPREG(rlSrc.lowReg), FPREG(rlSrc.highReg));
- DCHECK(rlDest.wide);
- DCHECK(rlSrc.wide);
- if (rlSrc.location == kLocPhysReg) {
- if (oatIsLive(cUnit, rlSrc.lowReg) ||
- oatIsLive(cUnit, rlSrc.highReg) ||
- oatIsPromoted(cUnit, rlSrc.lowReg) ||
- oatIsPromoted(cUnit, rlSrc.highReg) ||
- (rlDest.location == kLocPhysReg)) {
- // Src is live or promoted or Dest has assigned reg.
- rlDest = oatEvalLoc(cUnit, rlDest, kAnyReg, false);
- opRegCopyWide(cUnit, rlDest.lowReg, rlDest.highReg,
- rlSrc.lowReg, rlSrc.highReg);
- } else {
- // Just re-assign the registers. Dest gets Src's regs
- rlDest.lowReg = rlSrc.lowReg;
- rlDest.highReg = rlSrc.highReg;
- oatClobber(cUnit, rlSrc.lowReg);
- oatClobber(cUnit, rlSrc.highReg);
- }
+ LIR* defStart;
+ LIR* defEnd;
+ DCHECK_EQ(FPREG(rlSrc.lowReg), FPREG(rlSrc.highReg));
+ DCHECK(rlDest.wide);
+ DCHECK(rlSrc.wide);
+ if (rlSrc.location == kLocPhysReg) {
+ if (oatIsLive(cUnit, rlSrc.lowReg) ||
+ oatIsLive(cUnit, rlSrc.highReg) ||
+ oatIsPromoted(cUnit, rlSrc.lowReg) ||
+ oatIsPromoted(cUnit, rlSrc.highReg) ||
+ (rlDest.location == kLocPhysReg)) {
+ // Src is live or promoted or Dest has assigned reg.
+ rlDest = oatEvalLoc(cUnit, rlDest, kAnyReg, false);
+ opRegCopyWide(cUnit, rlDest.lowReg, rlDest.highReg,
+ rlSrc.lowReg, rlSrc.highReg);
} else {
- // Load Src either into promoted Dest or temps allocated for Dest
- rlDest = oatEvalLoc(cUnit, rlDest, kAnyReg, false);
- loadValueDirectWide(cUnit, rlSrc, rlDest.lowReg,
- rlDest.highReg);
+ // Just re-assign the registers. Dest gets Src's regs
+ rlDest.lowReg = rlSrc.lowReg;
+ rlDest.highReg = rlSrc.highReg;
+ oatClobber(cUnit, rlSrc.lowReg);
+ oatClobber(cUnit, rlSrc.highReg);
}
+ } else {
+ // Load Src either into promoted Dest or temps allocated for Dest
+ rlDest = oatEvalLoc(cUnit, rlDest, kAnyReg, false);
+ loadValueDirectWide(cUnit, rlSrc, rlDest.lowReg, rlDest.highReg);
+ }
- // Dest is now live and dirty (until/if we flush it to home location)
- oatMarkLive(cUnit, rlDest.lowReg, rlDest.sRegLow);
- oatMarkLive(cUnit, rlDest.highReg,
- oatSRegHi(rlDest.sRegLow));
- oatMarkDirty(cUnit, rlDest);
- oatMarkPair(cUnit, rlDest.lowReg, rlDest.highReg);
+ // Dest is now live and dirty (until/if we flush it to home location)
+ oatMarkLive(cUnit, rlDest.lowReg, rlDest.sRegLow);
+ oatMarkLive(cUnit, rlDest.highReg, oatSRegHi(rlDest.sRegLow));
+ oatMarkDirty(cUnit, rlDest);
+ oatMarkPair(cUnit, rlDest.lowReg, rlDest.highReg);
- oatResetDefLocWide(cUnit, rlDest);
- if ((oatIsDirty(cUnit, rlDest.lowReg) ||
- oatIsDirty(cUnit, rlDest.highReg)) &&
- (oatLiveOut(cUnit, rlDest.sRegLow) ||
- oatLiveOut(cUnit, oatSRegHi(rlDest.sRegLow)))) {
- defStart = (LIR*)cUnit->lastLIRInsn;
- DCHECK_EQ((SRegToVReg(cUnit, rlDest.sRegLow)+1),
- SRegToVReg(cUnit, oatSRegHi(rlDest.sRegLow)));
- storeBaseDispWide(cUnit, rSP, oatSRegOffset(cUnit, rlDest.sRegLow),
- rlDest.lowReg, rlDest.highReg);
- oatMarkClean(cUnit, rlDest);
- defEnd = (LIR*)cUnit->lastLIRInsn;
- oatMarkDefWide(cUnit, rlDest, defStart, defEnd);
- }
+ oatResetDefLocWide(cUnit, rlDest);
+ if ((oatIsDirty(cUnit, rlDest.lowReg) ||
+ oatIsDirty(cUnit, rlDest.highReg)) &&
+ (oatLiveOut(cUnit, rlDest.sRegLow) ||
+ oatLiveOut(cUnit, oatSRegHi(rlDest.sRegLow)))) {
+ defStart = (LIR*)cUnit->lastLIRInsn;
+ DCHECK_EQ((SRegToVReg(cUnit, rlDest.sRegLow)+1),
+ SRegToVReg(cUnit, oatSRegHi(rlDest.sRegLow)));
+ storeBaseDispWide(cUnit, rSP, oatSRegOffset(cUnit, rlDest.sRegLow),
+ rlDest.lowReg, rlDest.highReg);
+ oatMarkClean(cUnit, rlDest);
+ defEnd = (LIR*)cUnit->lastLIRInsn;
+ oatMarkDefWide(cUnit, rlDest, defStart, defEnd);
+ }
}
/*
@@ -273,39 +270,39 @@
*/
void markGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
{
- int regCardBase = oatAllocTemp(cUnit);
- int regCardNo = oatAllocTemp(cUnit);
- LIR* branchOver = opCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
+ int regCardBase = oatAllocTemp(cUnit);
+ int regCardNo = oatAllocTemp(cUnit);
+ LIR* branchOver = opCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
#if !defined(TARGET_X86)
- loadWordDisp(cUnit, rSELF, Thread::CardTableOffset().Int32Value(),
- regCardBase);
+ loadWordDisp(cUnit, rSELF, Thread::CardTableOffset().Int32Value(),
+ regCardBase);
#else
- newLIR2(cUnit, kX86Mov32RT, regCardBase,
- Thread::CardTableOffset().Int32Value());
+ newLIR2(cUnit, kX86Mov32RT, regCardBase,
+ Thread::CardTableOffset().Int32Value());
#endif
- opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
- storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
- kUnsignedByte);
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = (LIR*)target;
- oatFreeTemp(cUnit, regCardBase);
- oatFreeTemp(cUnit, regCardNo);
+ opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
+ storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
+ kUnsignedByte);
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)target;
+ oatFreeTemp(cUnit, regCardBase);
+ oatFreeTemp(cUnit, regCardNo);
}
/* Utilities to load the current Method* */
void loadCurrMethodDirect(CompilationUnit *cUnit, int rTgt)
{
- loadValueDirectFixed(cUnit, cUnit->regLocation[cUnit->methodSReg], rTgt);
+ loadValueDirectFixed(cUnit, cUnit->regLocation[cUnit->methodSReg], rTgt);
}
RegLocation loadCurrMethod(CompilationUnit *cUnit)
{
- return loadValue(cUnit, cUnit->regLocation[cUnit->methodSReg], kCoreReg);
+ return loadValue(cUnit, cUnit->regLocation[cUnit->methodSReg], kCoreReg);
}
bool methodStarInReg(CompilationUnit* cUnit)
{
- return (cUnit->regLocation[cUnit->methodSReg].location == kLocPhysReg);
+ return (cUnit->regLocation[cUnit->methodSReg].location == kLocPhysReg);
}
diff --git a/src/compiler/codegen/CodegenUtil.cc b/src/compiler/codegen/CodegenUtil.cc
index 777cf57..11982ba 100644
--- a/src/compiler/codegen/CodegenUtil.cc
+++ b/src/compiler/codegen/CodegenUtil.cc
@@ -18,36 +18,36 @@
void setMemRefType(LIR* lir, bool isLoad, int memType)
{
- u8 *maskPtr;
- u8 mask = ENCODE_MEM;;
- DCHECK(EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE));
- if (isLoad) {
- maskPtr = &lir->useMask;
- } else {
- maskPtr = &lir->defMask;
- }
- /* Clear out the memref flags */
- *maskPtr &= ~mask;
- /* ..and then add back the one we need */
- switch (memType) {
- case kLiteral:
- DCHECK(isLoad);
- *maskPtr |= ENCODE_LITERAL;
- break;
- case kDalvikReg:
- *maskPtr |= ENCODE_DALVIK_REG;
- break;
- case kHeapRef:
- *maskPtr |= ENCODE_HEAP_REF;
- break;
- case kMustNotAlias:
- /* Currently only loads can be marked as kMustNotAlias */
- DCHECK(!(EncodingMap[lir->opcode].flags & IS_STORE));
- *maskPtr |= ENCODE_MUST_NOT_ALIAS;
- break;
- default:
- LOG(FATAL) << "Oat: invalid memref kind - " << memType;
- }
+ u8 *maskPtr;
+ u8 mask = ENCODE_MEM;;
+ DCHECK(EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE));
+ if (isLoad) {
+ maskPtr = &lir->useMask;
+ } else {
+ maskPtr = &lir->defMask;
+ }
+ /* Clear out the memref flags */
+ *maskPtr &= ~mask;
+ /* ..and then add back the one we need */
+ switch (memType) {
+ case kLiteral:
+ DCHECK(isLoad);
+ *maskPtr |= ENCODE_LITERAL;
+ break;
+ case kDalvikReg:
+ *maskPtr |= ENCODE_DALVIK_REG;
+ break;
+ case kHeapRef:
+ *maskPtr |= ENCODE_HEAP_REF;
+ break;
+ case kMustNotAlias:
+ /* Currently only loads can be marked as kMustNotAlias */
+ DCHECK(!(EncodingMap[lir->opcode].flags & IS_STORE));
+ *maskPtr |= ENCODE_MUST_NOT_ALIAS;
+ break;
+ default:
+ LOG(FATAL) << "Oat: invalid memref kind - " << memType;
+ }
}
/*
@@ -55,16 +55,16 @@
*/
void annotateDalvikRegAccess(LIR* lir, int regId, bool isLoad, bool is64bit)
{
- setMemRefType(lir, isLoad, kDalvikReg);
+ setMemRefType(lir, isLoad, kDalvikReg);
- /*
- * Store the Dalvik register id in aliasInfo. Mark the MSB if it is a 64-bit
- * access.
- */
- lir->aliasInfo = regId;
- if (is64bit) {
- lir->aliasInfo |= 0x80000000;
- }
+ /*
+ * Store the Dalvik register id in aliasInfo. Mark the MSB if it is a 64-bit
+ * access.
+ */
+ lir->aliasInfo = regId;
+ if (is64bit) {
+ lir->aliasInfo |= 0x80000000;
+ }
}
/*
@@ -72,19 +72,19 @@
*/
inline u8 getRegMaskCommon(int reg)
{
- u8 seed;
- int shift;
- int regId = reg & 0x1f;
+ u8 seed;
+ int shift;
+ int regId = reg & 0x1f;
- /*
- * Each double register is equal to a pair of single-precision FP registers
- */
- seed = DOUBLEREG(reg) ? 3 : 1;
- /* FP register starts at bit position 16 */
- shift = FPREG(reg) ? kFPReg0 : 0;
- /* Expand the double register id into single offset */
- shift += regId;
- return (seed << shift);
+ /*
+ * Each double register is equal to a pair of single-precision FP registers
+ */
+ seed = DOUBLEREG(reg) ? 3 : 1;
+ /* FP register starts at bit position 16 */
+ shift = FPREG(reg) ? kFPReg0 : 0;
+ /* Expand the double register id into single offset */
+ shift += regId;
+ return (seed << shift);
}
/*
@@ -92,7 +92,7 @@
*/
inline void setupRegMask(u8* mask, int reg)
{
- *mask |= getRegMaskCommon(reg);
+ *mask |= getRegMaskCommon(reg);
}
/*
@@ -100,143 +100,143 @@
*/
void setupResourceMasks(LIR* lir)
{
- int opcode = lir->opcode;
- int flags;
+ int opcode = lir->opcode;
+ int flags;
- if (opcode <= 0) {
- lir->useMask = lir->defMask = 0;
- return;
- }
+ if (opcode <= 0) {
+ lir->useMask = lir->defMask = 0;
+ return;
+ }
- flags = EncodingMap[lir->opcode].flags;
+ flags = EncodingMap[lir->opcode].flags;
- if (flags & NEEDS_FIXUP) {
- lir->flags.pcRelFixup = true;
- }
+ if (flags & NEEDS_FIXUP) {
+ lir->flags.pcRelFixup = true;
+ }
- /* Get the starting size of the instruction's template */
- lir->flags.size = oatGetInsnSize(lir);
+ /* Get the starting size of the instruction's template */
+ lir->flags.size = oatGetInsnSize(lir);
- /* Set up the mask for resources that are updated */
- if (flags & (IS_LOAD | IS_STORE)) {
- /* Default to heap - will catch specialized classes later */
- setMemRefType(lir, flags & IS_LOAD, kHeapRef);
- }
+ /* Set up the mask for resources that are updated */
+ if (flags & (IS_LOAD | IS_STORE)) {
+ /* Default to heap - will catch specialized classes later */
+ setMemRefType(lir, flags & IS_LOAD, kHeapRef);
+ }
- /*
- * Conservatively assume the branch here will call out a function that in
- * turn will trash everything.
- */
- if (flags & IS_BRANCH) {
- lir->defMask = lir->useMask = ENCODE_ALL;
- return;
- }
+ /*
+ * Conservatively assume the branch here will call out a function that in
+ * turn will trash everything.
+ */
+ if (flags & IS_BRANCH) {
+ lir->defMask = lir->useMask = ENCODE_ALL;
+ return;
+ }
- if (flags & REG_DEF0) {
- setupRegMask(&lir->defMask, lir->operands[0]);
- }
+ if (flags & REG_DEF0) {
+ setupRegMask(&lir->defMask, lir->operands[0]);
+ }
- if (flags & REG_DEF1) {
- setupRegMask(&lir->defMask, lir->operands[1]);
- }
+ if (flags & REG_DEF1) {
+ setupRegMask(&lir->defMask, lir->operands[1]);
+ }
- if (flags & REG_DEF_SP) {
- lir->defMask |= ENCODE_REG_SP;
- }
+ if (flags & REG_DEF_SP) {
+ lir->defMask |= ENCODE_REG_SP;
+ }
#if !defined(TARGET_X86)
- if (flags & REG_DEF_LR) {
- lir->defMask |= ENCODE_REG_LR;
- }
+ if (flags & REG_DEF_LR) {
+ lir->defMask |= ENCODE_REG_LR;
+ }
#endif
- if (flags & REG_DEF_LIST0) {
- lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
- }
+ if (flags & REG_DEF_LIST0) {
+ lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
+ }
- if (flags & REG_DEF_LIST1) {
- lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
- }
+ if (flags & REG_DEF_LIST1) {
+ lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
+ }
#if defined(TARGET_ARM)
- if (flags & REG_DEF_FPCS_LIST0) {
- lir->defMask |= ENCODE_REG_FPCS_LIST(lir->operands[0]);
- }
+ if (flags & REG_DEF_FPCS_LIST0) {
+ lir->defMask |= ENCODE_REG_FPCS_LIST(lir->operands[0]);
+ }
- if (flags & REG_DEF_FPCS_LIST2) {
- for (int i = 0; i < lir->operands[2]; i++) {
- setupRegMask(&lir->defMask, lir->operands[1] + i);
- }
+ if (flags & REG_DEF_FPCS_LIST2) {
+ for (int i = 0; i < lir->operands[2]; i++) {
+ setupRegMask(&lir->defMask, lir->operands[1] + i);
}
+ }
#endif
- if (flags & SETS_CCODES) {
- lir->defMask |= ENCODE_CCODE;
- }
+ if (flags & SETS_CCODES) {
+ lir->defMask |= ENCODE_CCODE;
+ }
#if defined(TARGET_ARM)
- /* Conservatively treat the IT block */
- if (flags & IS_IT) {
- lir->defMask = ENCODE_ALL;
- }
+ /* Conservatively treat the IT block */
+ if (flags & IS_IT) {
+ lir->defMask = ENCODE_ALL;
+ }
#endif
- if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
- int i;
+ if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
+ int i;
- for (i = 0; i < 4; i++) {
- if (flags & (1 << (kRegUse0 + i))) {
- setupRegMask(&lir->useMask, lir->operands[i]);
- }
- }
+ for (i = 0; i < 4; i++) {
+ if (flags & (1 << (kRegUse0 + i))) {
+ setupRegMask(&lir->useMask, lir->operands[i]);
+ }
}
+ }
#if defined(TARGET_ARM)
- if (flags & REG_USE_PC) {
- lir->useMask |= ENCODE_REG_PC;
- }
+ if (flags & REG_USE_PC) {
+ lir->useMask |= ENCODE_REG_PC;
+ }
#endif
- if (flags & REG_USE_SP) {
- lir->useMask |= ENCODE_REG_SP;
- }
+ if (flags & REG_USE_SP) {
+ lir->useMask |= ENCODE_REG_SP;
+ }
- if (flags & REG_USE_LIST0) {
- lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
- }
+ if (flags & REG_USE_LIST0) {
+ lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
+ }
- if (flags & REG_USE_LIST1) {
- lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
- }
+ if (flags & REG_USE_LIST1) {
+ lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
+ }
#if defined(TARGET_ARM)
- if (flags & REG_USE_FPCS_LIST0) {
- lir->useMask |= ENCODE_REG_FPCS_LIST(lir->operands[0]);
- }
+ if (flags & REG_USE_FPCS_LIST0) {
+ lir->useMask |= ENCODE_REG_FPCS_LIST(lir->operands[0]);
+ }
- if (flags & REG_USE_FPCS_LIST2) {
- for (int i = 0; i < lir->operands[2]; i++) {
- setupRegMask(&lir->useMask, lir->operands[1] + i);
- }
+ if (flags & REG_USE_FPCS_LIST2) {
+ for (int i = 0; i < lir->operands[2]; i++) {
+ setupRegMask(&lir->useMask, lir->operands[1] + i);
}
+ }
#endif
- if (flags & USES_CCODES) {
- lir->useMask |= ENCODE_CCODE;
- }
+ if (flags & USES_CCODES) {
+ lir->useMask |= ENCODE_CCODE;
+ }
#if defined(TARGET_ARM)
- /* Fixup for kThumbPush/lr and kThumbPop/pc */
- if (opcode == kThumbPush || opcode == kThumbPop) {
- u8 r8Mask = getRegMaskCommon(r8);
- if ((opcode == kThumbPush) && (lir->useMask & r8Mask)) {
- lir->useMask &= ~r8Mask;
- lir->useMask |= ENCODE_REG_LR;
- } else if ((opcode == kThumbPop) && (lir->defMask & r8Mask)) {
- lir->defMask &= ~r8Mask;
- lir->defMask |= ENCODE_REG_PC;
- }
+ /* Fixup for kThumbPush/lr and kThumbPop/pc */
+ if (opcode == kThumbPush || opcode == kThumbPop) {
+ u8 r8Mask = getRegMaskCommon(r8);
+ if ((opcode == kThumbPush) && (lir->useMask & r8Mask)) {
+ lir->useMask &= ~r8Mask;
+ lir->useMask |= ENCODE_REG_LR;
+ } else if ((opcode == kThumbPop) && (lir->defMask & r8Mask)) {
+ lir->defMask &= ~r8Mask;
+ lir->defMask |= ENCODE_REG_PC;
}
+ }
#endif
}
@@ -249,189 +249,196 @@
/* Pretty-print a LIR instruction */
void oatDumpLIRInsn(CompilationUnit* cUnit, LIR* arg, unsigned char* baseAddr)
{
- LIR* lir = (LIR*) arg;
- int offset = lir->offset;
- int dest = lir->operands[0];
- const bool dumpNop = (cUnit->enableDebug & (1 << kDebugShowNops));
+ LIR* lir = (LIR*) arg;
+ int offset = lir->offset;
+ int dest = lir->operands[0];
+ const bool dumpNop = (cUnit->enableDebug & (1 << kDebugShowNops));
- /* Handle pseudo-ops individually, and all regular insns as a group */
- switch (lir->opcode) {
- case kPseudoMethodEntry:
- LOG(INFO) << "-------- method entry " <<
- PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- break;
- case kPseudoMethodExit:
- LOG(INFO) << "-------- Method_Exit";
- break;
- case kPseudoBarrier:
- LOG(INFO) << "-------- BARRIER";
- break;
- case kPseudoExtended:
- LOG(INFO) << "-------- " << (char* ) dest;
- break;
- case kPseudoSSARep:
- DUMP_SSA_REP(LOG(INFO) << "-------- kMirOpPhi: " << (char* ) dest);
- break;
- case kPseudoEntryBlock:
- LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
- break;
- case kPseudoDalvikByteCodeBoundary:
- LOG(INFO) << "-------- dalvik offset: 0x" << std::hex <<
- lir->dalvikOffset << " @ " << (char* )lir->operands[0];
- break;
- case kPseudoExitBlock:
- LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
- break;
- case kPseudoPseudoAlign4:
- LOG(INFO) << (intptr_t)baseAddr + offset << " (0x" << std::hex <<
- offset << "): .align4";
- break;
- case kPseudoEHBlockLabel:
- LOG(INFO) << "Exception_Handling:";
- break;
- case kPseudoTargetLabel:
- case kPseudoNormalBlockLabel:
- LOG(INFO) << "L" << (void*)lir << ":";
- break;
- case kPseudoThrowTarget:
- LOG(INFO) << "LT" << (void*)lir << ":";
- break;
- case kPseudoIntrinsicRetry:
- LOG(INFO) << "IR" << (void*)lir << ":";
- break;
- case kPseudoSuspendTarget:
- LOG(INFO) << "LS" << (void*)lir << ":";
- break;
- case kPseudoCaseLabel:
- LOG(INFO) << "LC" << (void*)lir << ": Case target 0x" <<
- std::hex << lir->operands[0] << "|" << std::dec <<
- lir->operands[0];
- break;
- default:
- if (lir->flags.isNop && !dumpNop) {
- break;
- } else {
- std::string op_name(buildInsnString(EncodingMap[lir->opcode].name, lir, baseAddr));
- std::string op_operands(buildInsnString(EncodingMap[lir->opcode].fmt, lir, baseAddr));
- LOG(INFO) << StringPrintf("%05x: %-9s%s%s", (unsigned int)(baseAddr + offset),
- op_name.c_str(), op_operands.c_str(), lir->flags.isNop ? "(nop)" : "");
- }
- break;
- }
+ /* Handle pseudo-ops individually, and all regular insns as a group */
+ switch (lir->opcode) {
+ case kPseudoMethodEntry:
+ LOG(INFO) << "-------- method entry "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ break;
+ case kPseudoMethodExit:
+ LOG(INFO) << "-------- Method_Exit";
+ break;
+ case kPseudoBarrier:
+ LOG(INFO) << "-------- BARRIER";
+ break;
+ case kPseudoExtended:
+ LOG(INFO) << "-------- " << (char* ) dest;
+ break;
+ case kPseudoSSARep:
+ DUMP_SSA_REP(LOG(INFO) << "-------- kMirOpPhi: " << (char* ) dest);
+ break;
+ case kPseudoEntryBlock:
+ LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
+ break;
+ case kPseudoDalvikByteCodeBoundary:
+ LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
+ << lir->dalvikOffset << " @ " << (char* )lir->operands[0];
+ break;
+ case kPseudoExitBlock:
+ LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
+ break;
+ case kPseudoPseudoAlign4:
+ LOG(INFO) << (intptr_t)baseAddr + offset << " (0x" << std::hex
+ << offset << "): .align4";
+ break;
+ case kPseudoEHBlockLabel:
+ LOG(INFO) << "Exception_Handling:";
+ break;
+ case kPseudoTargetLabel:
+ case kPseudoNormalBlockLabel:
+ LOG(INFO) << "L" << (void*)lir << ":";
+ break;
+ case kPseudoThrowTarget:
+ LOG(INFO) << "LT" << (void*)lir << ":";
+ break;
+ case kPseudoIntrinsicRetry:
+ LOG(INFO) << "IR" << (void*)lir << ":";
+ break;
+ case kPseudoSuspendTarget:
+ LOG(INFO) << "LS" << (void*)lir << ":";
+ break;
+ case kPseudoCaseLabel:
+ LOG(INFO) << "LC" << (void*)lir << ": Case target 0x"
+ << std::hex << lir->operands[0] << "|" << std::dec <<
+ lir->operands[0];
+ break;
+ default:
+ if (lir->flags.isNop && !dumpNop) {
+ break;
+ } else {
+ std::string op_name(buildInsnString(EncodingMap[lir->opcode].name,
+ lir, baseAddr));
+ std::string op_operands(buildInsnString(EncodingMap[lir->opcode].fmt
+ , lir, baseAddr));
+ LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
+ (unsigned int)(baseAddr + offset),
+ op_name.c_str(), op_operands.c_str(),
+ lir->flags.isNop ? "(nop)" : "");
+ }
+ break;
+ }
- if (lir->useMask && (!lir->flags.isNop || dumpNop)) {
- DUMP_RESOURCE_MASK(oatDumpResourceMask((LIR* ) lir,
- lir->useMask, "use"));
- }
- if (lir->defMask && (!lir->flags.isNop || dumpNop)) {
- DUMP_RESOURCE_MASK(oatDumpResourceMask((LIR* ) lir,
- lir->defMask, "def"));
- }
+ if (lir->useMask && (!lir->flags.isNop || dumpNop)) {
+ DUMP_RESOURCE_MASK(oatDumpResourceMask((LIR* ) lir, lir->useMask, "use"));
+ }
+ if (lir->defMask && (!lir->flags.isNop || dumpNop)) {
+ DUMP_RESOURCE_MASK(oatDumpResourceMask((LIR* ) lir, lir->defMask, "def"));
+ }
}
void oatDumpPromotionMap(CompilationUnit *cUnit)
{
- int numRegs = cUnit->numDalvikRegisters + cUnit->numCompilerTemps + 1;
- for (int i = 0; i < numRegs; i++) {
- PromotionMap vRegMap = cUnit->promotionMap[i];
- std::string buf;
- if (vRegMap.fpLocation == kLocPhysReg) {
- StringAppendF(&buf, " : s%d", vRegMap.fpReg & FP_REG_MASK);
- }
-
- std::string buf3;
- if (i < cUnit->numDalvikRegisters) {
- StringAppendF(&buf3, "%02d", i);
- } else if (i == cUnit->methodSReg) {
- buf3 = "Method*";
- } else {
- StringAppendF(&buf3, "ct%d", i - cUnit->numDalvikRegisters);
- }
-
- LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
- vRegMap.coreLocation == kLocPhysReg ?
- "r" : "SP+", vRegMap.coreLocation == kLocPhysReg ?
- vRegMap.coreReg : oatSRegOffset(cUnit, i), buf.c_str());
+ int numRegs = cUnit->numDalvikRegisters + cUnit->numCompilerTemps + 1;
+ for (int i = 0; i < numRegs; i++) {
+ PromotionMap vRegMap = cUnit->promotionMap[i];
+ std::string buf;
+ if (vRegMap.fpLocation == kLocPhysReg) {
+ StringAppendF(&buf, " : s%d", vRegMap.fpReg & FP_REG_MASK);
}
+
+ std::string buf3;
+ if (i < cUnit->numDalvikRegisters) {
+ StringAppendF(&buf3, "%02d", i);
+ } else if (i == cUnit->methodSReg) {
+ buf3 = "Method*";
+ } else {
+ StringAppendF(&buf3, "ct%d", i - cUnit->numDalvikRegisters);
+ }
+
+ LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
+ vRegMap.coreLocation == kLocPhysReg ?
+ "r" : "SP+", vRegMap.coreLocation == kLocPhysReg ?
+ vRegMap.coreReg : oatSRegOffset(cUnit, i),
+ buf.c_str());
+ }
}
/* Dump instructions and constant pool contents */
void oatCodegenDump(CompilationUnit* cUnit)
{
- LOG(INFO) << "/*";
- LOG(INFO) << "Dumping LIR insns for "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- LIR* lirInsn;
- LIR* thisLIR;
- int insnsSize = cUnit->insnsSize;
+ LOG(INFO) << "/*";
+ LOG(INFO) << "Dumping LIR insns for "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ LIR* lirInsn;
+ LIR* thisLIR;
+ int insnsSize = cUnit->insnsSize;
- LOG(INFO) << "Regs (excluding ins) : " << cUnit->numRegs;
- LOG(INFO) << "Ins : " << cUnit->numIns;
- LOG(INFO) << "Outs : " << cUnit->numOuts;
- LOG(INFO) << "CoreSpills : " << cUnit->numCoreSpills;
- LOG(INFO) << "FPSpills : " << cUnit->numFPSpills;
- LOG(INFO) << "CompilerTemps : " << cUnit->numCompilerTemps;
- LOG(INFO) << "Frame size : " << cUnit->frameSize;
- LOG(INFO) << "code size is " << cUnit->totalSize <<
- " bytes, Dalvik size is " << insnsSize * 2;
- LOG(INFO) << "expansion factor: " <<
- (float)cUnit->totalSize / (float)(insnsSize * 2);
- oatDumpPromotionMap(cUnit);
- for (lirInsn = cUnit->firstLIRInsn; lirInsn; lirInsn = lirInsn->next) {
- oatDumpLIRInsn(cUnit, lirInsn, 0);
- }
- for (lirInsn = cUnit->classPointerList; lirInsn; lirInsn = lirInsn->next) {
- thisLIR = (LIR*) lirInsn;
- LOG(INFO) << StringPrintf("%x (%04x): .class (%s)",
- thisLIR->offset, thisLIR->offset,
- ((CallsiteInfo *) thisLIR->operands[0])->classDescriptor);
- }
- for (lirInsn = cUnit->literalList; lirInsn; lirInsn = lirInsn->next) {
- thisLIR = (LIR*) lirInsn;
- LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)",
- thisLIR->offset, thisLIR->offset, thisLIR->operands[0]);
- }
+ LOG(INFO) << "Regs (excluding ins) : " << cUnit->numRegs;
+ LOG(INFO) << "Ins : " << cUnit->numIns;
+ LOG(INFO) << "Outs : " << cUnit->numOuts;
+ LOG(INFO) << "CoreSpills : " << cUnit->numCoreSpills;
+ LOG(INFO) << "FPSpills : " << cUnit->numFPSpills;
+ LOG(INFO) << "CompilerTemps : " << cUnit->numCompilerTemps;
+ LOG(INFO) << "Frame size : " << cUnit->frameSize;
+ LOG(INFO) << "code size is " << cUnit->totalSize <<
+ " bytes, Dalvik size is " << insnsSize * 2;
+ LOG(INFO) << "expansion factor: "
+ << (float)cUnit->totalSize / (float)(insnsSize * 2);
+ oatDumpPromotionMap(cUnit);
+ for (lirInsn = cUnit->firstLIRInsn; lirInsn; lirInsn = lirInsn->next) {
+ oatDumpLIRInsn(cUnit, lirInsn, 0);
+ }
+ for (lirInsn = cUnit->classPointerList; lirInsn; lirInsn = lirInsn->next) {
+ thisLIR = (LIR*) lirInsn;
+ LOG(INFO) << StringPrintf("%x (%04x): .class (%s)",
+ thisLIR->offset, thisLIR->offset,
+ ((CallsiteInfo *)
+ thisLIR->operands[0])->classDescriptor);
+ }
+ for (lirInsn = cUnit->literalList; lirInsn; lirInsn = lirInsn->next) {
+ thisLIR = (LIR*) lirInsn;
+ LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)",
+ thisLIR->offset, thisLIR->offset,
+ thisLIR->operands[0]);
+ }
- const DexFile::MethodId& method_id =
- cUnit->dex_file->GetMethodId(cUnit->method_idx);
- std::string signature(cUnit->dex_file->GetMethodSignature(method_id));
- std::string name(cUnit->dex_file->GetMethodName(method_id));
- std::string descriptor(cUnit->dex_file->GetMethodDeclaringClassDescriptor(method_id));
+ const DexFile::MethodId& method_id =
+ cUnit->dex_file->GetMethodId(cUnit->method_idx);
+ std::string signature(cUnit->dex_file->GetMethodSignature(method_id));
+ std::string name(cUnit->dex_file->GetMethodName(method_id));
+ std::string descriptor(cUnit->dex_file->GetMethodDeclaringClassDescriptor(method_id));
- // Dump mapping table
- if (cUnit->mappingTable.size() > 0) {
- std::string line(StringPrintf("\n MappingTable %s%s_%s_mappingTable[%zu] = {",
- descriptor.c_str(), name.c_str(), signature.c_str(), cUnit->mappingTable.size()));
- std::replace(line.begin(), line.end(), ';', '_');
- LOG(INFO) << line;
- for (uint32_t i = 0; i < cUnit->mappingTable.size(); i+=2) {
- line = StringPrintf(" {0x%05x, 0x%04x},",
- cUnit->mappingTable[i], cUnit->mappingTable[i+1]);
- LOG(INFO) << line;
- }
- LOG(INFO) <<" };\n\n";
+ // Dump mapping table
+ if (cUnit->mappingTable.size() > 0) {
+ std::string
+ line(StringPrintf("\n MappingTable %s%s_%s_mappingTable[%zu] = {",
+ descriptor.c_str(), name.c_str(), signature.c_str(),
+ cUnit->mappingTable.size()));
+ std::replace(line.begin(), line.end(), ';', '_');
+ LOG(INFO) << line;
+ for (uint32_t i = 0; i < cUnit->mappingTable.size(); i+=2) {
+ line = StringPrintf(" {0x%05x, 0x%04x},",
+ cUnit->mappingTable[i], cUnit->mappingTable[i+1]);
+ LOG(INFO) << line;
}
+ LOG(INFO) <<" };\n\n";
+ }
}
LIR* rawLIR(CompilationUnit* cUnit, int dalvikOffset, int opcode, int op0,
- int op1, int op2, int op3, int op4, LIR* target)
+ int op1, int op2, int op3, int op4, LIR* target)
{
- LIR* insn = (LIR* ) oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
- insn->dalvikOffset = dalvikOffset;
- insn->opcode = opcode;
- insn->operands[0] = op0;
- insn->operands[1] = op1;
- insn->operands[2] = op2;
- insn->operands[3] = op3;
- insn->operands[4] = op4;
- insn->target = target;
- oatSetupResourceMasks(insn);
- if (opcode == kPseudoTargetLabel) {
- // Always make labels scheduling barriers
- insn->defMask = ENCODE_ALL;
- }
- return insn;
+ LIR* insn = (LIR* ) oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
+ insn->dalvikOffset = dalvikOffset;
+ insn->opcode = opcode;
+ insn->operands[0] = op0;
+ insn->operands[1] = op1;
+ insn->operands[2] = op2;
+ insn->operands[3] = op3;
+ insn->operands[4] = op4;
+ insn->target = target;
+ oatSetupResourceMasks(insn);
+ if (opcode == kPseudoTargetLabel) {
+ // Always make labels scheduling barriers
+ insn->defMask = ENCODE_ALL;
+ }
+ return insn;
}
/*
@@ -440,76 +447,76 @@
*/
LIR* newLIR0(CompilationUnit* cUnit, int opcode)
{
- DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & NO_OPERAND))
- << EncodingMap[opcode].name << " " << (int)opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode);
- oatAppendLIR(cUnit, (LIR*) insn);
- return insn;
+ DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & NO_OPERAND))
+ << EncodingMap[opcode].name << " " << (int)opcode << " "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
+ << cUnit->currentDalvikOffset;
+ LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode);
+ oatAppendLIR(cUnit, (LIR*) insn);
+ return insn;
}
LIR* newLIR1(CompilationUnit* cUnit, int opcode,
- int dest)
+ int dest)
{
- DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_UNARY_OP))
- << EncodingMap[opcode].name << " " << (int)opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest);
- oatAppendLIR(cUnit, (LIR*) insn);
- return insn;
+ DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_UNARY_OP))
+ << EncodingMap[opcode].name << " " << (int)opcode << " "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
+ << cUnit->currentDalvikOffset;
+ LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest);
+ oatAppendLIR(cUnit, (LIR*) insn);
+ return insn;
}
LIR* newLIR2(CompilationUnit* cUnit, int opcode,
- int dest, int src1)
+ int dest, int src1)
{
- DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_BINARY_OP))
- << EncodingMap[opcode].name << " " << (int)opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1);
- oatAppendLIR(cUnit, (LIR*) insn);
- return insn;
+ DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_BINARY_OP))
+ << EncodingMap[opcode].name << " " << (int)opcode << " "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
+ << cUnit->currentDalvikOffset;
+ LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1);
+ oatAppendLIR(cUnit, (LIR*) insn);
+ return insn;
}
LIR* newLIR3(CompilationUnit* cUnit, int opcode,
- int dest, int src1, int src2)
+ int dest, int src1, int src2)
{
- DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_TERTIARY_OP))
- << EncodingMap[opcode].name << " " << (int)opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1,
- src2);
- oatAppendLIR(cUnit, (LIR*) insn);
- return insn;
+ DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_TERTIARY_OP))
+ << EncodingMap[opcode].name << " " << (int)opcode << " "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
+ << cUnit->currentDalvikOffset;
+ LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1,
+ src2);
+ oatAppendLIR(cUnit, (LIR*) insn);
+ return insn;
}
LIR* newLIR4(CompilationUnit* cUnit, int opcode,
- int dest, int src1, int src2, int info)
+ int dest, int src1, int src2, int info)
{
- DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_QUAD_OP))
- << EncodingMap[opcode].name << " " << (int)opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1,
- src2, info);
- oatAppendLIR(cUnit, (LIR*) insn);
- return insn;
+ DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_QUAD_OP))
+ << EncodingMap[opcode].name << " " << (int)opcode << " "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
+ << cUnit->currentDalvikOffset;
+ LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1,
+ src2, info);
+ oatAppendLIR(cUnit, (LIR*) insn);
+ return insn;
}
LIR* newLIR5(CompilationUnit* cUnit, int opcode,
- int dest, int src1, int src2, int info1, int info2)
+ int dest, int src1, int src2, int info1, int info2)
{
- DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_QUIN_OP))
- << EncodingMap[opcode].name << " " << (int)opcode << " "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
- << cUnit->currentDalvikOffset;
- LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1,
- src2, info1, info2);
- oatAppendLIR(cUnit, (LIR*) insn);
- return insn;
+ DCHECK(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_QUIN_OP))
+ << EncodingMap[opcode].name << " " << (int)opcode << " "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file) << " "
+ << cUnit->currentDalvikOffset;
+ LIR* insn = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, dest, src1,
+ src2, info1, info2);
+ oatAppendLIR(cUnit, (LIR*) insn);
+ return insn;
}
/*
@@ -518,32 +525,31 @@
*/
LIR* scanLiteralPool(LIR* dataTarget, int value, unsigned int delta)
{
- while (dataTarget) {
- if (((unsigned) (value - ((LIR* ) dataTarget)->operands[0])) <=
- delta)
- return (LIR* ) dataTarget;
- dataTarget = dataTarget->next;
- }
- return NULL;
+ while (dataTarget) {
+ if (((unsigned) (value - ((LIR* ) dataTarget)->operands[0])) <= delta)
+ return (LIR* ) dataTarget;
+ dataTarget = dataTarget->next;
+ }
+ return NULL;
}
/* Search the existing constants in the literal pool for an exact wide match */
LIR* scanLiteralPoolWide(LIR* dataTarget, int valLo, int valHi)
{
- bool loMatch = false;
- LIR* loTarget = NULL;
- while (dataTarget) {
- if (loMatch && (((LIR*)dataTarget)->operands[0] == valHi)) {
- return (LIR*)loTarget;
- }
- loMatch = false;
- if (((LIR*)dataTarget)->operands[0] == valLo) {
- loMatch = true;
- loTarget = dataTarget;
- }
- dataTarget = dataTarget->next;
+ bool loMatch = false;
+ LIR* loTarget = NULL;
+ while (dataTarget) {
+ if (loMatch && (((LIR*)dataTarget)->operands[0] == valHi)) {
+ return (LIR*)loTarget;
}
- return NULL;
+ loMatch = false;
+ if (((LIR*)dataTarget)->operands[0] == valLo) {
+ loMatch = true;
+ loTarget = dataTarget;
+ }
+ dataTarget = dataTarget->next;
+ }
+ return NULL;
}
/*
@@ -554,39 +560,38 @@
/* Add a 32-bit constant either in the constant pool */
LIR* addWordData(CompilationUnit* cUnit, LIR* *constantListP, int value)
{
- /* Add the constant to the literal pool */
- if (constantListP) {
- LIR* newValue = (LIR* ) oatNew(cUnit, sizeof(LIR), true,
- kAllocData);
- newValue->operands[0] = value;
- newValue->next = *constantListP;
- *constantListP = (LIR*) newValue;
- return newValue;
- }
- return NULL;
+ /* Add the constant to the literal pool */
+ if (constantListP) {
+ LIR* newValue = (LIR* ) oatNew(cUnit, sizeof(LIR), true, kAllocData);
+ newValue->operands[0] = value;
+ newValue->next = *constantListP;
+ *constantListP = (LIR*) newValue;
+ return newValue;
+ }
+ return NULL;
}
/* Add a 64-bit constant to the constant pool or mixed with code */
LIR* addWideData(CompilationUnit* cUnit, LIR* *constantListP,
- int valLo, int valHi)
+ int valLo, int valHi)
{
- //FIXME: hard-coded little endian, need BE variant
- // Insert high word into list first
- addWordData(cUnit, constantListP, valHi);
- return addWordData(cUnit, constantListP, valLo);
+ //FIXME: hard-coded little endian, need BE variant
+ // Insert high word into list first
+ addWordData(cUnit, constantListP, valHi);
+ return addWordData(cUnit, constantListP, valLo);
}
void pushWord(std::vector<uint8_t>&buf, int data) {
- buf.push_back( data & 0xff);
- buf.push_back( (data >> 8) & 0xff);
- buf.push_back( (data >> 16) & 0xff);
- buf.push_back( (data >> 24) & 0xff);
+ buf.push_back( data & 0xff);
+ buf.push_back( (data >> 8) & 0xff);
+ buf.push_back( (data >> 16) & 0xff);
+ buf.push_back( (data >> 24) & 0xff);
}
void alignBuffer(std::vector<uint8_t>&buf, size_t offset) {
- while (buf.size() < offset) {
- buf.push_back(0);
- }
+ while (buf.size() < offset) {
+ buf.push_back(0);
+ }
}
bool IsDirect(int invokeType) {
@@ -597,188 +602,189 @@
/* Write the literal pool to the output stream */
void installLiteralPools(CompilationUnit* cUnit)
{
- alignBuffer(cUnit->codeBuffer, cUnit->dataOffset);
- LIR* dataLIR = cUnit->literalList;
+ alignBuffer(cUnit->codeBuffer, cUnit->dataOffset);
+ LIR* dataLIR = cUnit->literalList;
+ while (dataLIR != NULL) {
+ pushWord(cUnit->codeBuffer, dataLIR->operands[0]);
+ dataLIR = NEXT_LIR(dataLIR);
+ }
+ // Push code and method literals, record offsets for the compiler to patch.
+ dataLIR = cUnit->codeLiteralList;
+ if (dataLIR != NULL) {
while (dataLIR != NULL) {
- pushWord(cUnit->codeBuffer, dataLIR->operands[0]);
- dataLIR = NEXT_LIR(dataLIR);
+ uint32_t target = dataLIR->operands[0];
+ cUnit->compiler->AddCodePatch(cUnit->dex_cache, cUnit->dex_file,
+ cUnit->method_idx,
+ cUnit->access_flags,
+ target,
+ IsDirect(dataLIR->operands[1]),
+ cUnit->codeBuffer.size());
+ const DexFile::MethodId& id = cUnit->dex_file->GetMethodId(target);
+ // unique based on target to ensure code deduplication works
+ uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
+ pushWord(cUnit->codeBuffer, unique_patch_value);
+ dataLIR = NEXT_LIR(dataLIR);
}
- // Push code and method literals, record offsets for the compiler to patch.
- dataLIR = cUnit->codeLiteralList;
- if (dataLIR != NULL) {
- while (dataLIR != NULL) {
- uint32_t target = dataLIR->operands[0];
- cUnit->compiler->AddCodePatch(cUnit->dex_cache, cUnit->dex_file,
- cUnit->method_idx,
- cUnit->access_flags,
- target,
- IsDirect(dataLIR->operands[1]),
- cUnit->codeBuffer.size());
- const DexFile::MethodId& id = cUnit->dex_file->GetMethodId(target);
- // unique based on target to ensure code deduplication works
- uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
- pushWord(cUnit->codeBuffer, unique_patch_value);
- dataLIR = NEXT_LIR(dataLIR);
- }
- dataLIR = cUnit->methodLiteralList;
- while (dataLIR != NULL) {
- uint32_t target = dataLIR->operands[0];
- cUnit->compiler->AddMethodPatch(cUnit->dex_cache, cUnit->dex_file,
- cUnit->method_idx,
- cUnit->access_flags,
- target,
- IsDirect(dataLIR->operands[1]),
- cUnit->codeBuffer.size());
- const DexFile::MethodId& id = cUnit->dex_file->GetMethodId(target);
- // unique based on target to ensure code deduplication works
- uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
- pushWord(cUnit->codeBuffer, unique_patch_value);
- dataLIR = NEXT_LIR(dataLIR);
- }
+ dataLIR = cUnit->methodLiteralList;
+ while (dataLIR != NULL) {
+ uint32_t target = dataLIR->operands[0];
+ cUnit->compiler->AddMethodPatch(cUnit->dex_cache, cUnit->dex_file,
+ cUnit->method_idx,
+ cUnit->access_flags,
+ target,
+ IsDirect(dataLIR->operands[1]),
+ cUnit->codeBuffer.size());
+ const DexFile::MethodId& id = cUnit->dex_file->GetMethodId(target);
+ // unique based on target to ensure code deduplication works
+ uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
+ pushWord(cUnit->codeBuffer, unique_patch_value);
+ dataLIR = NEXT_LIR(dataLIR);
}
+ }
}
/* Write the switch tables to the output stream */
void installSwitchTables(CompilationUnit* cUnit)
{
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->switchTables, &iterator);
- while (true) {
- SwitchTable* tabRec = (SwitchTable *) oatGrowableListIteratorNext(
- &iterator);
- if (tabRec == NULL) break;
- alignBuffer(cUnit->codeBuffer, tabRec->offset);
- /*
- * For Arm, our reference point is the address of the bx
- * instruction that does the launch, so we have to subtract
- * the auto pc-advance. For other targets the reference point
- * is a label, so we can use the offset as-is.
- */
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&cUnit->switchTables, &iterator);
+ while (true) {
+ SwitchTable* tabRec = (SwitchTable *) oatGrowableListIteratorNext(
+ &iterator);
+ if (tabRec == NULL) break;
+ alignBuffer(cUnit->codeBuffer, tabRec->offset);
+ /*
+ * For Arm, our reference point is the address of the bx
+ * instruction that does the launch, so we have to subtract
+ * the auto pc-advance. For other targets the reference point
+ * is a label, so we can use the offset as-is.
+ */
#if defined(TARGET_ARM)
- int bxOffset = tabRec->anchor->offset + 4;
+ int bxOffset = tabRec->anchor->offset + 4;
#elif defined(TARGET_X86)
- int bxOffset = 0;
+ int bxOffset = 0;
#else
- int bxOffset = tabRec->anchor->offset;
+ int bxOffset = tabRec->anchor->offset;
#endif
- if (cUnit->printMe) {
- LOG(INFO) << "Switch table for offset 0x" << std::hex << bxOffset;
- }
- if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
- int* keys = (int*)&(tabRec->table[2]);
- for (int elems = 0; elems < tabRec->table[1]; elems++) {
- int disp = tabRec->targets[elems]->offset - bxOffset;
- if (cUnit->printMe) {
- LOG(INFO) << " Case[" << elems << "] key: 0x" <<
- std::hex << keys[elems] << ", disp: 0x" <<
- std::hex << disp;
- }
- pushWord(cUnit->codeBuffer, keys[elems]);
- pushWord(cUnit->codeBuffer,
- tabRec->targets[elems]->offset - bxOffset);
- }
- } else {
- DCHECK_EQ(static_cast<int>(tabRec->table[0]), static_cast<int>(Instruction::kPackedSwitchSignature));
- for (int elems = 0; elems < tabRec->table[1]; elems++) {
- int disp = tabRec->targets[elems]->offset - bxOffset;
- if (cUnit->printMe) {
- LOG(INFO) << " Case[" << elems << "] disp: 0x" <<
- std::hex << disp;
- }
- pushWord(cUnit->codeBuffer,
- tabRec->targets[elems]->offset - bxOffset);
- }
- }
+ if (cUnit->printMe) {
+ LOG(INFO) << "Switch table for offset 0x" << std::hex << bxOffset;
}
+ if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
+ int* keys = (int*)&(tabRec->table[2]);
+ for (int elems = 0; elems < tabRec->table[1]; elems++) {
+ int disp = tabRec->targets[elems]->offset - bxOffset;
+ if (cUnit->printMe) {
+ LOG(INFO) << " Case[" << elems << "] key: 0x"
+ << std::hex << keys[elems] << ", disp: 0x"
+ << std::hex << disp;
+ }
+ pushWord(cUnit->codeBuffer, keys[elems]);
+ pushWord(cUnit->codeBuffer,
+ tabRec->targets[elems]->offset - bxOffset);
+ }
+ } else {
+ DCHECK_EQ(static_cast<int>(tabRec->table[0]),
+ static_cast<int>(Instruction::kPackedSwitchSignature));
+ for (int elems = 0; elems < tabRec->table[1]; elems++) {
+ int disp = tabRec->targets[elems]->offset - bxOffset;
+ if (cUnit->printMe) {
+ LOG(INFO) << " Case[" << elems << "] disp: 0x"
+ << std::hex << disp;
+ }
+ pushWord(cUnit->codeBuffer, tabRec->targets[elems]->offset - bxOffset);
+ }
+ }
+ }
}
/* Write the fill array dta to the output stream */
void installFillArrayData(CompilationUnit* cUnit)
{
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->fillArrayData, &iterator);
- while (true) {
- FillArrayData *tabRec = (FillArrayData *) oatGrowableListIteratorNext(
- &iterator);
- if (tabRec == NULL) break;
- alignBuffer(cUnit->codeBuffer, tabRec->offset);
- for (int i = 0; i < (tabRec->size + 1) / 2; i++) {
- cUnit->codeBuffer.push_back( tabRec->table[i] & 0xFF);
- cUnit->codeBuffer.push_back( (tabRec->table[i] >> 8) & 0xFF);
- }
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&cUnit->fillArrayData, &iterator);
+ while (true) {
+ FillArrayData *tabRec = (FillArrayData *) oatGrowableListIteratorNext(
+ &iterator);
+ if (tabRec == NULL) break;
+ alignBuffer(cUnit->codeBuffer, tabRec->offset);
+ for (int i = 0; i < (tabRec->size + 1) / 2; i++) {
+ cUnit->codeBuffer.push_back( tabRec->table[i] & 0xFF);
+ cUnit->codeBuffer.push_back( (tabRec->table[i] >> 8) & 0xFF);
}
+ }
}
int assignLiteralOffsetCommon(LIR* lir, int offset)
{
- for (;lir != NULL; lir = lir->next) {
- lir->offset = offset;
- offset += 4;
- }
- return offset;
+ for (;lir != NULL; lir = lir->next) {
+ lir->offset = offset;
+ offset += 4;
+ }
+ return offset;
}
void createMappingTable(CompilationUnit* cUnit)
{
- LIR* tgtLIR;
- int currentDalvikOffset = -1;
+ LIR* tgtLIR;
+ int currentDalvikOffset = -1;
- for (tgtLIR = (LIR *) cUnit->firstLIRInsn;
- tgtLIR;
- tgtLIR = NEXT_LIR(tgtLIR)) {
- if ((tgtLIR->opcode >= 0) && !tgtLIR->flags.isNop &&
- (currentDalvikOffset != tgtLIR->dalvikOffset)) {
- // Changed - need to emit a record
- cUnit->mappingTable.push_back(tgtLIR->offset);
- cUnit->mappingTable.push_back(tgtLIR->dalvikOffset);
- currentDalvikOffset = tgtLIR->dalvikOffset;
- }
+ for (tgtLIR = (LIR *) cUnit->firstLIRInsn;
+ tgtLIR;
+ tgtLIR = NEXT_LIR(tgtLIR)) {
+ if ((tgtLIR->opcode >= 0) && !tgtLIR->flags.isNop &&
+ (currentDalvikOffset != tgtLIR->dalvikOffset)) {
+ // Changed - need to emit a record
+ cUnit->mappingTable.push_back(tgtLIR->offset);
+ cUnit->mappingTable.push_back(tgtLIR->dalvikOffset);
+ currentDalvikOffset = tgtLIR->dalvikOffset;
}
+ }
}
/* Determine the offset of each literal field */
int assignLiteralOffset(CompilationUnit* cUnit, int offset)
{
- offset = assignLiteralOffsetCommon(cUnit->literalList, offset);
- offset = assignLiteralOffsetCommon(cUnit->codeLiteralList, offset);
- offset = assignLiteralOffsetCommon(cUnit->methodLiteralList, offset);
- return offset;
+ offset = assignLiteralOffsetCommon(cUnit->literalList, offset);
+ offset = assignLiteralOffsetCommon(cUnit->codeLiteralList, offset);
+ offset = assignLiteralOffsetCommon(cUnit->methodLiteralList, offset);
+ return offset;
}
int assignSwitchTablesOffset(CompilationUnit* cUnit, int offset)
{
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->switchTables, &iterator);
- while (true) {
- SwitchTable *tabRec = (SwitchTable *) oatGrowableListIteratorNext(
- &iterator);
- if (tabRec == NULL) break;
- tabRec->offset = offset;
- if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
- offset += tabRec->table[1] * (sizeof(int) * 2);
- } else {
- DCHECK_EQ(static_cast<int>(tabRec->table[0]), static_cast<int>(Instruction::kPackedSwitchSignature));
- offset += tabRec->table[1] * sizeof(int);
- }
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&cUnit->switchTables, &iterator);
+ while (true) {
+ SwitchTable *tabRec = (SwitchTable *) oatGrowableListIteratorNext(
+ &iterator);
+ if (tabRec == NULL) break;
+ tabRec->offset = offset;
+ if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
+ offset += tabRec->table[1] * (sizeof(int) * 2);
+ } else {
+ DCHECK_EQ(static_cast<int>(tabRec->table[0]),
+ static_cast<int>(Instruction::kPackedSwitchSignature));
+ offset += tabRec->table[1] * sizeof(int);
}
- return offset;
+ }
+ return offset;
}
int assignFillArrayDataOffset(CompilationUnit* cUnit, int offset)
{
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->fillArrayData, &iterator);
- while (true) {
- FillArrayData *tabRec = (FillArrayData *) oatGrowableListIteratorNext(
- &iterator);
- if (tabRec == NULL) break;
- tabRec->offset = offset;
- offset += tabRec->size;
- // word align
- offset = (offset + 3) & ~3;
- }
- return offset;
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&cUnit->fillArrayData, &iterator);
+ while (true) {
+ FillArrayData *tabRec = (FillArrayData *) oatGrowableListIteratorNext(
+ &iterator);
+ if (tabRec == NULL) break;
+ tabRec->offset = offset;
+ offset += tabRec->size;
+ // word align
+ offset = (offset + 3) & ~3;
+ }
+ return offset;
}
/*
@@ -787,21 +793,21 @@
*/
void oatAssignOffsets(CompilationUnit* cUnit)
{
- int offset = oatAssignInsnOffsets(cUnit);
+ int offset = oatAssignInsnOffsets(cUnit);
- /* Const values have to be word aligned */
- offset = (offset + 3) & ~3;
+ /* Const values have to be word aligned */
+ offset = (offset + 3) & ~3;
- /* Set up offsets for literals */
- cUnit->dataOffset = offset;
+ /* Set up offsets for literals */
+ cUnit->dataOffset = offset;
- offset = assignLiteralOffset(cUnit, offset);
+ offset = assignLiteralOffset(cUnit, offset);
- offset = assignSwitchTablesOffset(cUnit, offset);
+ offset = assignSwitchTablesOffset(cUnit, offset);
- offset = assignFillArrayDataOffset(cUnit, offset);
+ offset = assignFillArrayDataOffset(cUnit, offset);
- cUnit->totalSize = offset;
+ cUnit->totalSize = offset;
}
/*
@@ -811,41 +817,41 @@
*/
void oatAssembleLIR(CompilationUnit* cUnit)
{
- oatAssignOffsets(cUnit);
- /*
- * Assemble here. Note that we generate code with optimistic assumptions
- * and if found now to work, we'll have to redo the sequence and retry.
- */
+ oatAssignOffsets(cUnit);
+ /*
+ * Assemble here. Note that we generate code with optimistic assumptions
+ * and if found now to work, we'll have to redo the sequence and retry.
+ */
- while (true) {
- AssemblerStatus res = oatAssembleInstructions(cUnit, 0);
- if (res == kSuccess) {
- break;
- } else {
- cUnit->assemblerRetries++;
- if (cUnit->assemblerRetries > MAX_ASSEMBLER_RETRIES) {
- oatCodegenDump(cUnit);
- LOG(FATAL) << "Assembler error - too many retries";
- }
- // Redo offsets and try again
- oatAssignOffsets(cUnit);
- cUnit->codeBuffer.clear();
- }
+ while (true) {
+ AssemblerStatus res = oatAssembleInstructions(cUnit, 0);
+ if (res == kSuccess) {
+ break;
+ } else {
+ cUnit->assemblerRetries++;
+ if (cUnit->assemblerRetries > MAX_ASSEMBLER_RETRIES) {
+ oatCodegenDump(cUnit);
+ LOG(FATAL) << "Assembler error - too many retries";
+ }
+ // Redo offsets and try again
+ oatAssignOffsets(cUnit);
+ cUnit->codeBuffer.clear();
}
+ }
- // Install literals
- installLiteralPools(cUnit);
+ // Install literals
+ installLiteralPools(cUnit);
- // Install switch tables
- installSwitchTables(cUnit);
+ // Install switch tables
+ installSwitchTables(cUnit);
- // Install fill array data
- installFillArrayData(cUnit);
+ // Install fill array data
+ installFillArrayData(cUnit);
- /*
- * Create the mapping table
- */
- createMappingTable(cUnit);
+ /*
+ * Create the mapping table
+ */
+ createMappingTable(cUnit);
}
/*
@@ -857,109 +863,108 @@
*/
LIR* insertCaseLabel(CompilationUnit* cUnit, int vaddr, int keyVal)
{
- SafeMap<unsigned int, LIR*>::iterator it;
- it = cUnit->boundaryMap.find(vaddr);
- if (it == cUnit->boundaryMap.end()) {
- LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
- }
- LIR* newLabel = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
- newLabel->dalvikOffset = vaddr;
- newLabel->opcode = kPseudoCaseLabel;
- newLabel->operands[0] = keyVal;
- oatInsertLIRAfter(it->second, (LIR*)newLabel);
- return newLabel;
+ SafeMap<unsigned int, LIR*>::iterator it;
+ it = cUnit->boundaryMap.find(vaddr);
+ if (it == cUnit->boundaryMap.end()) {
+ LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
+ }
+ LIR* newLabel = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
+ newLabel->dalvikOffset = vaddr;
+ newLabel->opcode = kPseudoCaseLabel;
+ newLabel->operands[0] = keyVal;
+ oatInsertLIRAfter(it->second, (LIR*)newLabel);
+ return newLabel;
}
void markPackedCaseLabels(CompilationUnit* cUnit, SwitchTable *tabRec)
{
- const u2* table = tabRec->table;
- int baseVaddr = tabRec->vaddr;
- int *targets = (int*)&table[4];
- int entries = table[1];
- int lowKey = s4FromSwitchData(&table[2]);
- for (int i = 0; i < entries; i++) {
- tabRec->targets[i] = insertCaseLabel(cUnit, baseVaddr + targets[i],
- i + lowKey);
- }
+ const u2* table = tabRec->table;
+ int baseVaddr = tabRec->vaddr;
+ int *targets = (int*)&table[4];
+ int entries = table[1];
+ int lowKey = s4FromSwitchData(&table[2]);
+ for (int i = 0; i < entries; i++) {
+ tabRec->targets[i] = insertCaseLabel(cUnit, baseVaddr + targets[i],
+ i + lowKey);
+ }
}
void markSparseCaseLabels(CompilationUnit* cUnit, SwitchTable *tabRec)
{
- const u2* table = tabRec->table;
- int baseVaddr = tabRec->vaddr;
- int entries = table[1];
- int* keys = (int*)&table[2];
- int* targets = &keys[entries];
- for (int i = 0; i < entries; i++) {
- tabRec->targets[i] = insertCaseLabel(cUnit, baseVaddr + targets[i],
- keys[i]);
- }
+ const u2* table = tabRec->table;
+ int baseVaddr = tabRec->vaddr;
+ int entries = table[1];
+ int* keys = (int*)&table[2];
+ int* targets = &keys[entries];
+ for (int i = 0; i < entries; i++) {
+ tabRec->targets[i] = insertCaseLabel(cUnit, baseVaddr + targets[i],
+ keys[i]);
+ }
}
void oatProcessSwitchTables(CompilationUnit* cUnit)
{
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->switchTables, &iterator);
- while (true) {
- SwitchTable *tabRec = (SwitchTable *) oatGrowableListIteratorNext(
- &iterator);
- if (tabRec == NULL) break;
- if (tabRec->table[0] == Instruction::kPackedSwitchSignature) {
- markPackedCaseLabels(cUnit, tabRec);
- } else if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
- markSparseCaseLabels(cUnit, tabRec);
- } else {
- LOG(FATAL) << "Invalid switch table";
- }
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&cUnit->switchTables, &iterator);
+ while (true) {
+ SwitchTable *tabRec =
+ (SwitchTable *) oatGrowableListIteratorNext(&iterator);
+ if (tabRec == NULL) break;
+ if (tabRec->table[0] == Instruction::kPackedSwitchSignature) {
+ markPackedCaseLabels(cUnit, tabRec);
+ } else if (tabRec->table[0] == Instruction::kSparseSwitchSignature) {
+ markSparseCaseLabels(cUnit, tabRec);
+ } else {
+ LOG(FATAL) << "Invalid switch table";
}
+ }
}
//FIXME: Do we have endian issues here?
void dumpSparseSwitchTable(const u2* table)
- /*
- * Sparse switch data format:
- * ushort ident = 0x0200 magic value
- * ushort size number of entries in the table; > 0
- * int keys[size] keys, sorted low-to-high; 32-bit aligned
- * int targets[size] branch targets, relative to switch opcode
- *
- * Total size is (2+size*4) 16-bit code units.
- */
+ /*
+ * Sparse switch data format:
+ * ushort ident = 0x0200 magic value
+ * ushort size number of entries in the table; > 0
+ * int keys[size] keys, sorted low-to-high; 32-bit aligned
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (2+size*4) 16-bit code units.
+ */
{
- u2 ident = table[0];
- int entries = table[1];
- int* keys = (int*)&table[2];
- int* targets = &keys[entries];
- LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident <<
- ", entries: " << std::dec << entries;
- for (int i = 0; i < entries; i++) {
- LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex <<
- targets[i];
- }
+ u2 ident = table[0];
+ int entries = table[1];
+ int* keys = (int*)&table[2];
+ int* targets = &keys[entries];
+ LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident
+ << ", entries: " << std::dec << entries;
+ for (int i = 0; i < entries; i++) {
+ LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
+ }
}
void dumpPackedSwitchTable(const u2* table)
- /*
- * Packed switch data format:
- * ushort ident = 0x0100 magic value
- * ushort size number of entries in the table
- * int first_key first (and lowest) switch case value
- * int targets[size] branch targets, relative to switch opcode
- *
- * Total size is (4+size*2) 16-bit code units.
- */
+ /*
+ * Packed switch data format:
+ * ushort ident = 0x0100 magic value
+ * ushort size number of entries in the table
+ * int first_key first (and lowest) switch case value
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (4+size*2) 16-bit code units.
+ */
{
- u2 ident = table[0];
- int* targets = (int*)&table[4];
- int entries = table[1];
- int lowKey = s4FromSwitchData(&table[2]);
- LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident <<
- ", entries: " << std::dec << entries << ", lowKey: " << lowKey;
- for (int i = 0; i < entries; i++) {
- LOG(INFO) << " Key[" << (i + lowKey) << "] -> 0x" << std::hex <<
- targets[i];
- }
+ u2 ident = table[0];
+ int* targets = (int*)&table[4];
+ int entries = table[1];
+ int lowKey = s4FromSwitchData(&table[2]);
+ LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
+ << ", entries: " << std::dec << entries << ", lowKey: " << lowKey;
+ for (int i = 0; i < entries; i++) {
+ LOG(INFO) << " Key[" << (i + lowKey) << "] -> 0x" << std::hex
+ << targets[i];
+ }
}
diff --git a/src/compiler/codegen/CompilerCodegen.h b/src/compiler/codegen/CompilerCodegen.h
index 20b2e45..9381735 100644
--- a/src/compiler/codegen/CompilerCodegen.h
+++ b/src/compiler/codegen/CompilerCodegen.h
@@ -22,7 +22,8 @@
namespace art {
LIR* rawLIR(CompilationUnit* cUnit, int dalvikOffset, int opcode, int op0 = 0,
- int op1 = 0, int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+ int op1 = 0, int op2 = 0, int op3 = 0, int op4 = 0,
+ LIR* target = NULL);
int oatGetInsnSize(LIR* lir);
diff --git a/src/compiler/codegen/GenCommon.cc b/src/compiler/codegen/GenCommon.cc
index e2b8282..9e21aea 100644
--- a/src/compiler/codegen/GenCommon.cc
+++ b/src/compiler/codegen/GenCommon.cc
@@ -24,7 +24,7 @@
* and "op" calls may be used here.
*/
void genInvoke(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- InvokeType type, bool isRange);
+ InvokeType type, bool isRange);
#if defined(TARGET_ARM)
LIR* opIT(CompilationUnit* cUnit, ArmConditionCode cond, const char* guide);
bool smallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
@@ -33,148 +33,148 @@
void callRuntimeHelperImm(CompilationUnit* cUnit, int helperOffset, int arg0) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- loadConstant(cUnit, rARG0, arg0);
- oatClobberCalleeSave(cUnit);
+ loadConstant(cUnit, rARG0, arg0);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperReg(CompilationUnit* cUnit, int helperOffset, int arg0) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- opRegCopy(cUnit, rARG0, arg0);
- oatClobberCalleeSave(cUnit);
+ opRegCopy(cUnit, rARG0, arg0);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperRegLocation(CompilationUnit* cUnit, int helperOffset,
RegLocation arg0) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- if (arg0.wide == 0) {
- loadValueDirectFixed(cUnit, arg0, rARG0);
- } else {
- loadValueDirectWideFixed(cUnit, arg0, rARG0, rARG1);
- }
- oatClobberCalleeSave(cUnit);
+ if (arg0.wide == 0) {
+ loadValueDirectFixed(cUnit, arg0, rARG0);
+ } else {
+ loadValueDirectWideFixed(cUnit, arg0, rARG0, rARG1);
+ }
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperImmImm(CompilationUnit* cUnit, int helperOffset,
int arg0, int arg1) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- loadConstant(cUnit, rARG0, arg0);
- loadConstant(cUnit, rARG1, arg1);
- oatClobberCalleeSave(cUnit);
+ loadConstant(cUnit, rARG0, arg0);
+ loadConstant(cUnit, rARG1, arg1);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperImmRegLocation(CompilationUnit* cUnit, int helperOffset,
int arg0, RegLocation arg1) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- if (arg1.wide == 0) {
- loadValueDirectFixed(cUnit, arg1, rARG1);
- } else {
- loadValueDirectWideFixed(cUnit, arg1, rARG1, rARG2);
- }
- loadConstant(cUnit, rARG0, arg0);
- oatClobberCalleeSave(cUnit);
+ if (arg1.wide == 0) {
+ loadValueDirectFixed(cUnit, arg1, rARG1);
+ } else {
+ loadValueDirectWideFixed(cUnit, arg1, rARG1, rARG2);
+ }
+ loadConstant(cUnit, rARG0, arg0);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperRegLocationImm(CompilationUnit* cUnit, int helperOffset,
RegLocation arg0, int arg1) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- loadValueDirectFixed(cUnit, arg0, rARG0);
- loadConstant(cUnit, rARG1, arg1);
- oatClobberCalleeSave(cUnit);
+ loadValueDirectFixed(cUnit, arg0, rARG0);
+ loadConstant(cUnit, rARG1, arg1);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperImmReg(CompilationUnit* cUnit, int helperOffset,
int arg0, int arg1) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- opRegCopy(cUnit, rARG1, arg1);
- loadConstant(cUnit, rARG0, arg0);
- oatClobberCalleeSave(cUnit);
+ opRegCopy(cUnit, rARG1, arg1);
+ loadConstant(cUnit, rARG0, arg0);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperRegImm(CompilationUnit* cUnit, int helperOffset,
int arg0, int arg1) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- opRegCopy(cUnit, rARG0, arg0);
- loadConstant(cUnit, rARG1, arg1);
- oatClobberCalleeSave(cUnit);
+ opRegCopy(cUnit, rARG0, arg0);
+ loadConstant(cUnit, rARG1, arg1);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperImmMethod(CompilationUnit* cUnit, int helperOffset,
int arg0) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- loadCurrMethodDirect(cUnit, rARG1);
- loadConstant(cUnit, rARG0, arg0);
- oatClobberCalleeSave(cUnit);
+ loadCurrMethodDirect(cUnit, rARG1);
+ loadConstant(cUnit, rARG0, arg0);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
@@ -183,98 +183,99 @@
RegLocation arg0,
RegLocation arg1) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- if (arg0.wide == 0) {
- loadValueDirectFixed(cUnit, arg0, rARG0);
- if (arg1.wide == 0) {
- loadValueDirectFixed(cUnit, arg1, rARG1);
- } else {
- loadValueDirectWideFixed(cUnit, arg1, rARG1, rARG2);
- }
+ if (arg0.wide == 0) {
+ loadValueDirectFixed(cUnit, arg0, rARG0);
+ if (arg1.wide == 0) {
+ loadValueDirectFixed(cUnit, arg1, rARG1);
} else {
- loadValueDirectWideFixed(cUnit, arg0, rARG0, rARG1);
- if (arg1.wide == 0) {
- loadValueDirectFixed(cUnit, arg1, rARG2);
- } else {
- loadValueDirectWideFixed(cUnit, arg1, rARG2, rARG3);
- }
+ loadValueDirectWideFixed(cUnit, arg1, rARG1, rARG2);
}
- oatClobberCalleeSave(cUnit);
+ } else {
+ loadValueDirectWideFixed(cUnit, arg0, rARG0, rARG1);
+ if (arg1.wide == 0) {
+ loadValueDirectFixed(cUnit, arg1, rARG2);
+ } else {
+ loadValueDirectWideFixed(cUnit, arg1, rARG2, rARG3);
+ }
+ }
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset,
int arg0, int arg1) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- DCHECK_NE((int)rARG0, arg1); // check copy into arg0 won't clobber arg1
- opRegCopy(cUnit, rARG0, arg0);
- opRegCopy(cUnit, rARG1, arg1);
- oatClobberCalleeSave(cUnit);
+ DCHECK_NE((int)rARG0, arg1); // check copy into arg0 won't clobber arg1
+ opRegCopy(cUnit, rARG0, arg0);
+ opRegCopy(cUnit, rARG1, arg1);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperRegRegImm(CompilationUnit* cUnit, int helperOffset,
int arg0, int arg1, int arg2) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- DCHECK_NE((int)rARG0, arg1); // check copy into arg0 won't clobber arg1
- opRegCopy(cUnit, rARG0, arg0);
- opRegCopy(cUnit, rARG1, arg1);
- loadConstant(cUnit, rARG2, arg2);
- oatClobberCalleeSave(cUnit);
+ DCHECK_NE((int)rARG0, arg1); // check copy into arg0 won't clobber arg1
+ opRegCopy(cUnit, rARG0, arg0);
+ opRegCopy(cUnit, rARG1, arg1);
+ loadConstant(cUnit, rARG2, arg2);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
-void callRuntimeHelperImmMethodRegLocation(CompilationUnit* cUnit, int helperOffset,
- int arg0, RegLocation arg2) {
+void callRuntimeHelperImmMethodRegLocation(CompilationUnit* cUnit,
+ int helperOffset,
+ int arg0, RegLocation arg2) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- loadValueDirectFixed(cUnit, arg2, rARG2);
- loadCurrMethodDirect(cUnit, rARG1);
- loadConstant(cUnit, rARG0, arg0);
- oatClobberCalleeSave(cUnit);
+ loadValueDirectFixed(cUnit, arg2, rARG2);
+ loadCurrMethodDirect(cUnit, rARG1);
+ loadConstant(cUnit, rARG0, arg0);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
void callRuntimeHelperImmMethodImm(CompilationUnit* cUnit, int helperOffset,
int arg0, int arg2) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- loadCurrMethodDirect(cUnit, rARG1);
- loadConstant(cUnit, rARG2, arg2);
- loadConstant(cUnit, rARG0, arg0);
- oatClobberCalleeSave(cUnit);
+ loadCurrMethodDirect(cUnit, rARG1);
+ loadConstant(cUnit, rARG2, arg2);
+ loadConstant(cUnit, rARG0, arg0);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
@@ -283,21 +284,21 @@
int arg0, RegLocation arg1,
RegLocation arg2) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, helperOffset);
+ int rTgt = loadHelper(cUnit, helperOffset);
#endif
- loadValueDirectFixed(cUnit, arg1, rARG1);
- if (arg2.wide == 0) {
- loadValueDirectFixed(cUnit, arg2, rARG2);
- } else {
- loadValueDirectWideFixed(cUnit, arg2, rARG2, rARG3);
- }
- loadConstant(cUnit, rARG0, arg0);
- oatClobberCalleeSave(cUnit);
+ loadValueDirectFixed(cUnit, arg1, rARG1);
+ if (arg2.wide == 0) {
+ loadValueDirectFixed(cUnit, arg2, rARG2);
+ } else {
+ loadValueDirectWideFixed(cUnit, arg2, rARG2, rARG3);
+ }
+ loadConstant(cUnit, rARG0, arg0);
+ oatClobberCalleeSave(cUnit);
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#else
- opThreadMem(cUnit, kOpBlx, helperOffset);
+ opThreadMem(cUnit, kOpBlx, helperOffset);
#endif
}
@@ -307,18 +308,18 @@
*/
void genBarrier(CompilationUnit* cUnit)
{
- LIR* barrier = newLIR0(cUnit, kPseudoBarrier);
- /* Mark all resources as being clobbered */
- barrier->defMask = -1;
+ LIR* barrier = newLIR0(cUnit, kPseudoBarrier);
+ /* Mark all resources as being clobbered */
+ barrier->defMask = -1;
}
/* Generate unconditional branch instructions */
LIR* opUnconditionalBranch(CompilationUnit* cUnit, LIR* target)
{
- LIR* branch = opBranchUnconditional(cUnit, kOpUncondBr);
- branch->target = (LIR*) target;
- return branch;
+ LIR* branch = opBranchUnconditional(cUnit, kOpUncondBr);
+ branch->target = (LIR*) target;
+ return branch;
}
// FIXME: need to do some work to split out targets with
@@ -327,170 +328,169 @@
LIR* genCheck(CompilationUnit* cUnit, ConditionCode cCode, MIR* mir,
ThrowKind kind)
{
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- mir ? mir->offset : 0);
- LIR* branch = opCondBranch(cUnit, cCode, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
+ mir ? mir->offset : 0);
+ LIR* branch = opCondBranch(cUnit, cCode, tgt);
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ return branch;
}
#endif
LIR* genImmedCheck(CompilationUnit* cUnit, ConditionCode cCode,
int reg, int immVal, MIR* mir, ThrowKind kind)
{
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind, mir->offset);
- LIR* branch;
- if (cCode == kCondAl) {
- branch = opUnconditionalBranch(cUnit, tgt);
- } else {
- branch = opCmpImmBranch(cUnit, cCode, reg, immVal, tgt);
- }
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind, mir->offset);
+ LIR* branch;
+ if (cCode == kCondAl) {
+ branch = opUnconditionalBranch(cUnit, tgt);
+ } else {
+ branch = opCmpImmBranch(cUnit, cCode, reg, immVal, tgt);
+ }
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ return branch;
}
/* Perform null-check on a register. */
LIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg, MIR* mir)
{
- if (!(cUnit->disableOpt & (1 << kNullCheckElimination)) &&
- mir->optimizationFlags & MIR_IGNORE_NULL_CHECK) {
- return NULL;
- }
- return genImmedCheck(cUnit, kCondEq, mReg, 0, mir, kThrowNullPointer);
+ if (!(cUnit->disableOpt & (1 << kNullCheckElimination)) &&
+ mir->optimizationFlags & MIR_IGNORE_NULL_CHECK) {
+ return NULL;
+ }
+ return genImmedCheck(cUnit, kCondEq, mReg, 0, mir, kThrowNullPointer);
}
/* Perform check on two registers */
LIR* genRegRegCheck(CompilationUnit* cUnit, ConditionCode cCode,
- int reg1, int reg2, MIR* mir, ThrowKind kind)
+ int reg1, int reg2, MIR* mir, ThrowKind kind)
{
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- mir ? mir->offset : 0, reg1, reg2);
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
+ mir ? mir->offset : 0, reg1, reg2);
#if defined(TARGET_MIPS)
- LIR* branch = opCmpBranch(cUnit, cCode, reg1, reg2, tgt);
+ LIR* branch = opCmpBranch(cUnit, cCode, reg1, reg2, tgt);
#else
- opRegReg(cUnit, kOpCmp, reg1, reg2);
- LIR* branch = opCondBranch(cUnit, cCode, tgt);
+ opRegReg(cUnit, kOpCmp, reg1, reg2);
+ LIR* branch = opCondBranch(cUnit, cCode, tgt);
#endif
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ return branch;
}
void genCompareAndBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
RegLocation rlSrc1, RegLocation rlSrc2, LIR* labelList)
{
- ConditionCode cond;
- rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- switch (opcode) {
- case Instruction::IF_EQ:
- cond = kCondEq;
- break;
- case Instruction::IF_NE:
- cond = kCondNe;
- break;
- case Instruction::IF_LT:
- cond = kCondLt;
- break;
- case Instruction::IF_GE:
- cond = kCondGe;
- break;
- case Instruction::IF_GT:
- cond = kCondGt;
- break;
- case Instruction::IF_LE:
- cond = kCondLe;
- break;
- default:
- cond = (ConditionCode)0;
- LOG(FATAL) << "Unexpected opcode " << (int)opcode;
- }
+ ConditionCode cond;
+ rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::IF_EQ:
+ cond = kCondEq;
+ break;
+ case Instruction::IF_NE:
+ cond = kCondNe;
+ break;
+ case Instruction::IF_LT:
+ cond = kCondLt;
+ break;
+ case Instruction::IF_GE:
+ cond = kCondGe;
+ break;
+ case Instruction::IF_GT:
+ cond = kCondGt;
+ break;
+ case Instruction::IF_LE:
+ cond = kCondLe;
+ break;
+ default:
+ cond = (ConditionCode)0;
+ LOG(FATAL) << "Unexpected opcode " << (int)opcode;
+ }
#if defined(TARGET_MIPS)
- opCmpBranch(cUnit, cond, rlSrc1.lowReg, rlSrc2.lowReg,
- &labelList[bb->taken->id]);
+ opCmpBranch(cUnit, cond, rlSrc1.lowReg, rlSrc2.lowReg,
+ &labelList[bb->taken->id]);
#else
- opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- opCondBranch(cUnit, cond, &labelList[bb->taken->id]);
+ opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
+ opCondBranch(cUnit, cond, &labelList[bb->taken->id]);
#endif
- opUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
+ opUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
}
void genCompareZeroAndBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
RegLocation rlSrc, LIR* labelList)
{
- ConditionCode cond;
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- switch (opcode) {
- case Instruction::IF_EQZ:
- cond = kCondEq;
- break;
- case Instruction::IF_NEZ:
- cond = kCondNe;
- break;
- case Instruction::IF_LTZ:
- cond = kCondLt;
- break;
- case Instruction::IF_GEZ:
- cond = kCondGe;
- break;
- case Instruction::IF_GTZ:
- cond = kCondGt;
- break;
- case Instruction::IF_LEZ:
- cond = kCondLe;
- break;
- default:
- cond = (ConditionCode)0;
- LOG(FATAL) << "Unexpected opcode " << (int)opcode;
- }
+ ConditionCode cond;
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::IF_EQZ:
+ cond = kCondEq;
+ break;
+ case Instruction::IF_NEZ:
+ cond = kCondNe;
+ break;
+ case Instruction::IF_LTZ:
+ cond = kCondLt;
+ break;
+ case Instruction::IF_GEZ:
+ cond = kCondGe;
+ break;
+ case Instruction::IF_GTZ:
+ cond = kCondGt;
+ break;
+ case Instruction::IF_LEZ:
+ cond = kCondLe;
+ break;
+ default:
+ cond = (ConditionCode)0;
+ LOG(FATAL) << "Unexpected opcode " << (int)opcode;
+ }
#if defined(TARGET_MIPS) || defined(TARGET_X86)
- opCmpImmBranch(cUnit, cond, rlSrc.lowReg, 0, &labelList[bb->taken->id]);
+ opCmpImmBranch(cUnit, cond, rlSrc.lowReg, 0, &labelList[bb->taken->id]);
#else
- opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
- opCondBranch(cUnit, cond, &labelList[bb->taken->id]);
+ opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
+ opCondBranch(cUnit, cond, &labelList[bb->taken->id]);
#endif
- opUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
+ opUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
}
void genIntToLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc)
{
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- if (rlSrc.location == kLocPhysReg) {
- opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- } else {
- loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
- }
- opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
- rlResult.lowReg, 31);
- storeValueWide(cUnit, rlDest, rlResult);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ if (rlSrc.location == kLocPhysReg) {
+ opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ } else {
+ loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
+ }
+ opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
+ storeValueWide(cUnit, rlDest, rlResult);
}
void genIntNarrowing(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc)
{
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- OpKind op = kOpInvalid;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::INT_TO_BYTE:
- op = kOp2Byte;
- break;
- case Instruction::INT_TO_SHORT:
- op = kOp2Short;
- break;
- case Instruction::INT_TO_CHAR:
- op = kOp2Char;
- break;
- default:
- LOG(ERROR) << "Bad int conversion type";
- }
- opRegReg(cUnit, op, rlResult.lowReg, rlSrc.lowReg);
- storeValue(cUnit, rlDest, rlResult);
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ OpKind op = kOpInvalid;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::INT_TO_BYTE:
+ op = kOp2Byte;
+ break;
+ case Instruction::INT_TO_SHORT:
+ op = kOp2Short;
+ break;
+ case Instruction::INT_TO_CHAR:
+ op = kOp2Char;
+ break;
+ default:
+ LOG(ERROR) << "Bad int conversion type";
+ }
+ opRegReg(cUnit, op, rlResult.lowReg, rlSrc.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
}
/*
@@ -501,20 +501,20 @@
void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit); /* Everything to home location */
- uint32_t type_idx = mir->dalvikInsn.vC;
- int funcOffset;
- if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- cUnit->dex_cache,
- *cUnit->dex_file,
- type_idx)) {
- funcOffset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
- } else {
- funcOffset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
- }
- callRuntimeHelperImmMethodRegLocation(cUnit, funcOffset, type_idx, rlSrc);
- RegLocation rlResult = oatGetReturn(cUnit, false);
- storeValue(cUnit, rlDest, rlResult);
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ uint32_t type_idx = mir->dalvikInsn.vC;
+ int funcOffset;
+ if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
+ cUnit->dex_cache,
+ *cUnit->dex_file,
+ type_idx)) {
+ funcOffset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+ } else {
+ funcOffset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+ }
+ callRuntimeHelperImmMethodRegLocation(cUnit, funcOffset, type_idx, rlSrc);
+ RegLocation rlResult = oatGetReturn(cUnit, false);
+ storeValue(cUnit, rlDest, rlResult);
}
/*
@@ -525,302 +525,299 @@
*/
void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
{
- DecodedInstruction* dInsn = &mir->dalvikInsn;
- int elems = dInsn->vA;
- int typeIdx = dInsn->vB;
- oatFlushAllRegs(cUnit); /* Everything to home location */
- int funcOffset;
- if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- cUnit->dex_cache,
- *cUnit->dex_file,
- typeIdx)) {
- funcOffset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
- } else {
- funcOffset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
- }
- callRuntimeHelperImmMethodImm(cUnit, funcOffset, typeIdx, elems);
- oatFreeTemp(cUnit, rARG2);
- oatFreeTemp(cUnit, rARG1);
+ DecodedInstruction* dInsn = &mir->dalvikInsn;
+ int elems = dInsn->vA;
+ int typeIdx = dInsn->vB;
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ int funcOffset;
+ if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
+ cUnit->dex_cache,
+ *cUnit->dex_file,
+ typeIdx)) {
+ funcOffset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+ } else {
+ funcOffset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+ }
+ callRuntimeHelperImmMethodImm(cUnit, funcOffset, typeIdx, elems);
+ oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG1);
+ /*
+ * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
+ * return region. Because AllocFromCode placed the new array
+ * in rRET0, we'll just lock it into place. When debugger support is
+ * added, it may be necessary to additionally copy all return
+ * values to a home location in thread-local storage
+ */
+ oatLockTemp(cUnit, rRET0);
+
+ // TODO: use the correct component size, currently all supported types
+ // share array alignment with ints (see comment at head of function)
+ size_t component_size = sizeof(int32_t);
+
+ // Having a range of 0 is legal
+ if (isRange && (dInsn->vA > 0)) {
/*
- * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
- * return region. Because AllocFromCode placed the new array
- * in rRET0, we'll just lock it into place. When debugger support is
- * added, it may be necessary to additionally copy all return
- * values to a home location in thread-local storage
+ * Bit of ugliness here. We're going generate a mem copy loop
+ * on the register range, but it is possible that some regs
+ * in the range have been promoted. This is unlikely, but
+ * before generating the copy, we'll just force a flush
+ * of any regs in the source range that have been promoted to
+ * home location.
*/
- oatLockTemp(cUnit, rRET0);
-
- // TODO: use the correct component size, currently all supported types
- // share array alignment with ints (see comment at head of function)
- size_t component_size = sizeof(int32_t);
-
- // Having a range of 0 is legal
- if (isRange && (dInsn->vA > 0)) {
- /*
- * Bit of ugliness here. We're going generate a mem copy loop
- * on the register range, but it is possible that some regs
- * in the range have been promoted. This is unlikely, but
- * before generating the copy, we'll just force a flush
- * of any regs in the source range that have been promoted to
- * home location.
- */
- for (unsigned int i = 0; i < dInsn->vA; i++) {
- RegLocation loc = oatUpdateLoc(cUnit,
- oatGetSrc(cUnit, mir, i));
- if (loc.location == kLocPhysReg) {
- storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, kWord);
- }
- }
- /*
- * TUNING note: generated code here could be much improved, but
- * this is an uncommon operation and isn't especially performance
- * critical.
- */
- int rSrc = oatAllocTemp(cUnit);
- int rDst = oatAllocTemp(cUnit);
- int rIdx = oatAllocTemp(cUnit);
-#if defined(TARGET_ARM)
- int rVal = rLR; // Using a lot of temps, rLR is known free here
-#elif defined(TARGET_X86)
- int rVal = rSrc;
-#else
- int rVal = oatAllocTemp(cUnit);
-#endif
- // Set up source pointer
- RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
- opRegRegImm(cUnit, kOpAdd, rSrc, rSP,
- oatSRegOffset(cUnit, rlFirst.sRegLow));
- // Set up the target pointer
- opRegRegImm(cUnit, kOpAdd, rDst, rRET0,
- Array::DataOffset(component_size).Int32Value());
- // Set up the loop counter (known to be > 0)
- loadConstant(cUnit, rIdx, dInsn->vA - 1);
- // Generate the copy loop. Going backwards for convenience
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- // Copy next element
- loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
- storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
-#if defined(TARGET_ARM)
- // Combine sub & test using sub setflags encoding here
- newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
- opCondBranch(cUnit, kCondGe, target);
-#else
- oatFreeTemp(cUnit, rVal);
- opRegImm(cUnit, kOpSub, rIdx, 1);
- opCmpImmBranch(cUnit, kCondGe, rIdx, 0, target);
-#endif
- } else if (!isRange) {
- // TUNING: interleave
- for (unsigned int i = 0; i < dInsn->vA; i++) {
- RegLocation rlArg = loadValue(cUnit,
- oatGetSrc(cUnit, mir, i), kCoreReg);
- storeBaseDisp(cUnit, rRET0,
- Array::DataOffset(component_size).Int32Value() +
- i * 4, rlArg.lowReg, kWord);
- // If the loadValue caused a temp to be allocated, free it
- if (oatIsTemp(cUnit, rlArg.lowReg)) {
- oatFreeTemp(cUnit, rlArg.lowReg);
- }
- }
+ for (unsigned int i = 0; i < dInsn->vA; i++) {
+ RegLocation loc = oatUpdateLoc(cUnit, oatGetSrc(cUnit, mir, i));
+ if (loc.location == kLocPhysReg) {
+ storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
+ loc.lowReg, kWord);
+ }
}
+ /*
+ * TUNING note: generated code here could be much improved, but
+ * this is an uncommon operation and isn't especially performance
+ * critical.
+ */
+ int rSrc = oatAllocTemp(cUnit);
+ int rDst = oatAllocTemp(cUnit);
+ int rIdx = oatAllocTemp(cUnit);
+#if defined(TARGET_ARM)
+ int rVal = rLR; // Using a lot of temps, rLR is known free here
+#elif defined(TARGET_X86)
+ int rVal = rSrc;
+#else
+ int rVal = oatAllocTemp(cUnit);
+#endif
+ // Set up source pointer
+ RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
+ opRegRegImm(cUnit, kOpAdd, rSrc, rSP,
+ oatSRegOffset(cUnit, rlFirst.sRegLow));
+ // Set up the target pointer
+ opRegRegImm(cUnit, kOpAdd, rDst, rRET0,
+ Array::DataOffset(component_size).Int32Value());
+ // Set up the loop counter (known to be > 0)
+ loadConstant(cUnit, rIdx, dInsn->vA - 1);
+ // Generate the copy loop. Going backwards for convenience
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ // Copy next element
+ loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
+ storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
+#if defined(TARGET_ARM)
+ // Combine sub & test using sub setflags encoding here
+ newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
+ opCondBranch(cUnit, kCondGe, target);
+#else
+ oatFreeTemp(cUnit, rVal);
+ opRegImm(cUnit, kOpSub, rIdx, 1);
+ opCmpImmBranch(cUnit, kCondGe, rIdx, 0, target);
+#endif
+ } else if (!isRange) {
+ // TUNING: interleave
+ for (unsigned int i = 0; i < dInsn->vA; i++) {
+ RegLocation rlArg = loadValue(cUnit, oatGetSrc(cUnit, mir, i), kCoreReg);
+ storeBaseDisp(cUnit, rRET0,
+ Array::DataOffset(component_size).Int32Value() +
+ i * 4, rlArg.lowReg, kWord);
+ // If the loadValue caused a temp to be allocated, free it
+ if (oatIsTemp(cUnit, rlArg.lowReg)) {
+ oatFreeTemp(cUnit, rlArg.lowReg);
+ }
+ }
+ }
}
void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
- bool isLongOrDouble, bool isObject)
+ bool isLongOrDouble, bool isObject)
{
- int fieldOffset;
- int ssbIndex;
- bool isVolatile;
- bool isReferrersClass;
- uint32_t fieldIdx = mir->dalvikInsn.vB;
+ int fieldOffset;
+ int ssbIndex;
+ bool isVolatile;
+ bool isReferrersClass;
+ uint32_t fieldIdx = mir->dalvikInsn.vB;
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file, *cUnit->dex_cache,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
+ OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
+ *cUnit->dex_file, *cUnit->dex_cache,
+ cUnit->code_item, cUnit->method_idx,
+ cUnit->access_flags);
- bool fastPath =
- cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
- fieldOffset, ssbIndex,
- isReferrersClass, isVolatile, true);
- if (fastPath && !SLOW_FIELD_PATH) {
- DCHECK_GE(fieldOffset, 0);
- int rBase;
- if (isReferrersClass) {
- // Fast path, static storage base is this method's class
- RegLocation rlMethod = loadCurrMethod(cUnit);
- rBase = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rlMethod.lowReg,
- Method::DeclaringClassOffset().Int32Value(), rBase);
- if (oatIsTemp(cUnit, rlMethod.lowReg)) {
- oatFreeTemp(cUnit, rlMethod.lowReg);
- }
- } else {
- // Medium path, static storage base in a different class which
- // requires checks that the other class is initialized.
- DCHECK_GE(ssbIndex, 0);
- // May do runtime call so everything to home locations.
- oatFlushAllRegs(cUnit);
- // Using fixed register to sync with possible call to runtime
- // support.
- int rMethod = rARG1;
- oatLockTemp(cUnit, rMethod);
- loadCurrMethodDirect(cUnit, rMethod);
- rBase = rARG0;
- oatLockTemp(cUnit, rBase);
- loadWordDisp(cUnit, rMethod,
- Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
- rBase);
- loadWordDisp(cUnit, rBase,
- Array::DataOffset(sizeof(Object*)).Int32Value() + sizeof(int32_t*) *
- ssbIndex, rBase);
- // rBase now points at appropriate static storage base (Class*)
- // or NULL if not initialized. Check for NULL and call helper if NULL.
- // TUNING: fast path should fall through
- LIR* branchOver = opCmpImmBranch(cUnit, kCondNe, rBase, 0, NULL);
- loadConstant(cUnit, rARG0, ssbIndex);
- callRuntimeHelperImm(cUnit,
- ENTRYPOINT_OFFSET(pInitializeStaticStorage),
- ssbIndex);
-#if defined(TARGET_MIPS)
- // For Arm, rRET0 = rARG0 = rBASE, for Mips, we need to copy
- opRegCopy(cUnit, rBase, rRET0);
-#endif
- LIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = (LIR*)skipTarget;
- oatFreeTemp(cUnit, rMethod);
- }
- // rBase now holds static storage base
- if (isLongOrDouble) {
- rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
- } else {
- rlSrc = oatGetSrc(cUnit, mir, 0);
- rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
- }
-//FIXME: need to generalize the barrier call
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kST);
- }
- if (isLongOrDouble) {
- storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
- rlSrc.highReg);
- } else {
- storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
- }
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
- if (isObject) {
- markGCCard(cUnit, rlSrc.lowReg, rBase);
- }
- oatFreeTemp(cUnit, rBase);
+ bool fastPath =
+ cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
+ fieldOffset, ssbIndex,
+ isReferrersClass, isVolatile,
+ true);
+ if (fastPath && !SLOW_FIELD_PATH) {
+ DCHECK_GE(fieldOffset, 0);
+ int rBase;
+ if (isReferrersClass) {
+ // Fast path, static storage base is this method's class
+ RegLocation rlMethod = loadCurrMethod(cUnit);
+ rBase = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rlMethod.lowReg,
+ Method::DeclaringClassOffset().Int32Value(), rBase);
+ if (oatIsTemp(cUnit, rlMethod.lowReg)) {
+ oatFreeTemp(cUnit, rlMethod.lowReg);
+ }
} else {
- oatFlushAllRegs(cUnit); // Everything to home locations
- int setterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pSet64Static) :
- (isObject ? ENTRYPOINT_OFFSET(pSetObjStatic)
- : ENTRYPOINT_OFFSET(pSet32Static));
- callRuntimeHelperImmRegLocation(cUnit, setterOffset, fieldIdx, rlSrc);
+ // Medium path, static storage base in a different class which
+ // requires checks that the other class is initialized.
+ DCHECK_GE(ssbIndex, 0);
+ // May do runtime call so everything to home locations.
+ oatFlushAllRegs(cUnit);
+ // Using fixed register to sync with possible call to runtime
+ // support.
+ int rMethod = rARG1;
+ oatLockTemp(cUnit, rMethod);
+ loadCurrMethodDirect(cUnit, rMethod);
+ rBase = rARG0;
+ oatLockTemp(cUnit, rBase);
+ loadWordDisp(cUnit, rMethod,
+ Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
+ rBase);
+ loadWordDisp(cUnit, rBase,
+ Array::DataOffset(sizeof(Object*)).Int32Value() +
+ sizeof(int32_t*) * ssbIndex, rBase);
+ // rBase now points at appropriate static storage base (Class*)
+ // or NULL if not initialized. Check for NULL and call helper if NULL.
+ // TUNING: fast path should fall through
+ LIR* branchOver = opCmpImmBranch(cUnit, kCondNe, rBase, 0, NULL);
+ loadConstant(cUnit, rARG0, ssbIndex);
+ callRuntimeHelperImm(cUnit,
+ ENTRYPOINT_OFFSET(pInitializeStaticStorage),
+ ssbIndex);
+#if defined(TARGET_MIPS)
+ // For Arm, rRET0 = rARG0 = rBASE, for Mips, we need to copy
+ opRegCopy(cUnit, rBase, rRET0);
+#endif
+ LIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)skipTarget;
+ oatFreeTemp(cUnit, rMethod);
}
+ // rBase now holds static storage base
+ if (isLongOrDouble) {
+ rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
+ } else {
+ rlSrc = oatGetSrc(cUnit, mir, 0);
+ rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
+ }
+//FIXME: need to generalize the barrier call
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kST);
+ }
+ if (isLongOrDouble) {
+ storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
+ rlSrc.highReg);
+ } else {
+ storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
+ }
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kSY);
+ }
+ if (isObject) {
+ markGCCard(cUnit, rlSrc.lowReg, rBase);
+ }
+ oatFreeTemp(cUnit, rBase);
+ } else {
+ oatFlushAllRegs(cUnit); // Everything to home locations
+ int setterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pSet64Static) :
+ (isObject ? ENTRYPOINT_OFFSET(pSetObjStatic)
+ : ENTRYPOINT_OFFSET(pSet32Static));
+ callRuntimeHelperImmRegLocation(cUnit, setterOffset, fieldIdx, rlSrc);
+ }
}
void genSget(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- bool isLongOrDouble, bool isObject)
+ bool isLongOrDouble, bool isObject)
{
- int fieldOffset;
- int ssbIndex;
- bool isVolatile;
- bool isReferrersClass;
- uint32_t fieldIdx = mir->dalvikInsn.vB;
+ int fieldOffset;
+ int ssbIndex;
+ bool isVolatile;
+ bool isReferrersClass;
+ uint32_t fieldIdx = mir->dalvikInsn.vB;
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file, *cUnit->dex_cache,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
+ OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
+ *cUnit->dex_file, *cUnit->dex_cache,
+ cUnit->code_item, cUnit->method_idx,
+ cUnit->access_flags);
- bool fastPath =
- cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
- fieldOffset, ssbIndex,
- isReferrersClass, isVolatile,
- false);
- if (fastPath && !SLOW_FIELD_PATH) {
- DCHECK_GE(fieldOffset, 0);
- int rBase;
- if (isReferrersClass) {
- // Fast path, static storage base is this method's class
- RegLocation rlMethod = loadCurrMethod(cUnit);
- rBase = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rlMethod.lowReg,
- Method::DeclaringClassOffset().Int32Value(), rBase);
- } else {
- // Medium path, static storage base in a different class which
- // requires checks that the other class is initialized
- DCHECK_GE(ssbIndex, 0);
- // May do runtime call so everything to home locations.
- oatFlushAllRegs(cUnit);
- // Using fixed register to sync with possible call to runtime
- // support
- int rMethod = rARG1;
- oatLockTemp(cUnit, rMethod);
- loadCurrMethodDirect(cUnit, rMethod);
- rBase = rARG0;
- oatLockTemp(cUnit, rBase);
- loadWordDisp(cUnit, rMethod,
- Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
- rBase);
- loadWordDisp(cUnit, rBase,
- Array::DataOffset(sizeof(Object*)).Int32Value() +
- sizeof(int32_t*) * ssbIndex,
- rBase);
- // rBase now points at appropriate static storage base (Class*)
- // or NULL if not initialized. Check for NULL and call helper if NULL.
- // TUNING: fast path should fall through
- LIR* branchOver = opCmpImmBranch(cUnit, kCondNe, rBase, 0, NULL);
- callRuntimeHelperImm(cUnit,
- ENTRYPOINT_OFFSET(pInitializeStaticStorage),
- ssbIndex);
-#if defined(TARGET_MIPS)
- // For Arm, rRET0 = rARG0 = rBASE, for Mips, we need to copy
- opRegCopy(cUnit, rBase, rRET0);
-#endif
- LIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = (LIR*)skipTarget;
- oatFreeTemp(cUnit, rMethod);
- }
- // rBase now holds static storage base
- rlDest = isLongOrDouble ? oatGetDestWide(cUnit, mir, 0, 1)
- : oatGetDest(cUnit, mir, 0);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
- if (isLongOrDouble) {
- loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
- rlResult.highReg, INVALID_SREG);
- } else {
- loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
- }
- oatFreeTemp(cUnit, rBase);
- if (isLongOrDouble) {
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- storeValue(cUnit, rlDest, rlResult);
- }
+ bool fastPath =
+ cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
+ fieldOffset, ssbIndex,
+ isReferrersClass, isVolatile,
+ false);
+ if (fastPath && !SLOW_FIELD_PATH) {
+ DCHECK_GE(fieldOffset, 0);
+ int rBase;
+ if (isReferrersClass) {
+ // Fast path, static storage base is this method's class
+ RegLocation rlMethod = loadCurrMethod(cUnit);
+ rBase = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rlMethod.lowReg,
+ Method::DeclaringClassOffset().Int32Value(), rBase);
} else {
- oatFlushAllRegs(cUnit); // Everything to home locations
- int getterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pGet64Static) :
- (isObject ? ENTRYPOINT_OFFSET(pGetObjStatic)
- : ENTRYPOINT_OFFSET(pGet32Static));
- callRuntimeHelperImm(cUnit, getterOffset, fieldIdx);
- if (isLongOrDouble) {
- RegLocation rlResult = oatGetReturnWide(cUnit, rlDest.fp);
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- RegLocation rlResult = oatGetReturn(cUnit, rlDest.fp);
- storeValue(cUnit, rlDest, rlResult);
- }
+ // Medium path, static storage base in a different class which
+ // requires checks that the other class is initialized
+ DCHECK_GE(ssbIndex, 0);
+ // May do runtime call so everything to home locations.
+ oatFlushAllRegs(cUnit);
+ // Using fixed register to sync with possible call to runtime
+ // support
+ int rMethod = rARG1;
+ oatLockTemp(cUnit, rMethod);
+ loadCurrMethodDirect(cUnit, rMethod);
+ rBase = rARG0;
+ oatLockTemp(cUnit, rBase);
+ loadWordDisp(cUnit, rMethod,
+ Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
+ rBase);
+ loadWordDisp(cUnit, rBase,
+ Array::DataOffset(sizeof(Object*)).Int32Value() +
+ sizeof(int32_t*) * ssbIndex, rBase);
+ // rBase now points at appropriate static storage base (Class*)
+ // or NULL if not initialized. Check for NULL and call helper if NULL.
+ // TUNING: fast path should fall through
+ LIR* branchOver = opCmpImmBranch(cUnit, kCondNe, rBase, 0, NULL);
+ callRuntimeHelperImm(cUnit, ENTRYPOINT_OFFSET(pInitializeStaticStorage),
+ ssbIndex);
+#if defined(TARGET_MIPS)
+ // For Arm, rRET0 = rARG0 = rBASE, for Mips, we need to copy
+ opRegCopy(cUnit, rBase, rRET0);
+#endif
+ LIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)skipTarget;
+ oatFreeTemp(cUnit, rMethod);
}
+ // rBase now holds static storage base
+ rlDest = isLongOrDouble ? oatGetDestWide(cUnit, mir, 0, 1)
+ : oatGetDest(cUnit, mir, 0);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kSY);
+ }
+ if (isLongOrDouble) {
+ loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
+ rlResult.highReg, INVALID_SREG);
+ } else {
+ loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
+ }
+ oatFreeTemp(cUnit, rBase);
+ if (isLongOrDouble) {
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ storeValue(cUnit, rlDest, rlResult);
+ }
+ } else {
+ oatFlushAllRegs(cUnit); // Everything to home locations
+ int getterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pGet64Static) :
+ (isObject ? ENTRYPOINT_OFFSET(pGetObjStatic)
+ : ENTRYPOINT_OFFSET(pGet32Static));
+ callRuntimeHelperImm(cUnit, getterOffset, fieldIdx);
+ if (isLongOrDouble) {
+ RegLocation rlResult = oatGetReturnWide(cUnit, rlDest.fp);
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ RegLocation rlResult = oatGetReturn(cUnit, rlDest.fp);
+ storeValue(cUnit, rlDest, rlResult);
+ }
+ }
}
@@ -828,387 +825,385 @@
void genShowTarget(CompilationUnit* cUnit)
{
#if defined(TARGET_X86)
- UNIMPLEMENTED(WARNING) << "genShowTarget";
+ UNIMPLEMENTED(WARNING) << "genShowTarget";
#else
- LIR* branchOver = opCmpImmBranch(cUnit, kCondNe, rINVOKE_TGT, 0, NULL);
- loadWordDisp(cUnit, rSELF,
- ENTRYPOINT_OFFSET(pDebugMe), rINVOKE_TGT);
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = (LIR*)target;
+ LIR* branchOver = opCmpImmBranch(cUnit, kCondNe, rINVOKE_TGT, 0, NULL);
+ loadWordDisp(cUnit, rSELF, ENTRYPOINT_OFFSET(pDebugMe), rINVOKE_TGT);
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)target;
#endif
}
void genThrowVerificationError(CompilationUnit* cUnit, MIR* mir)
{
- callRuntimeHelperImmImm(cUnit, ENTRYPOINT_OFFSET(pThrowVerificationErrorFromCode),
- mir->dalvikInsn.vA, mir->dalvikInsn.vB);
+ callRuntimeHelperImmImm(cUnit,
+ ENTRYPOINT_OFFSET(pThrowVerificationErrorFromCode),
+ mir->dalvikInsn.vA, mir->dalvikInsn.vB);
}
void handleSuspendLaunchpads(CompilationUnit *cUnit)
{
- LIR** suspendLabel = (LIR **)cUnit->suspendLaunchpads.elemList;
- int numElems = cUnit->suspendLaunchpads.numUsed;
- for (int i = 0; i < numElems; i++) {
- oatResetRegPool(cUnit);
- oatResetDefTracking(cUnit);
- LIR* lab = suspendLabel[i];
- LIR* resumeLab = (LIR*)lab->operands[0];
- cUnit->currentDalvikOffset = lab->operands[1];
- oatAppendLIR(cUnit, lab);
+ LIR** suspendLabel = (LIR **)cUnit->suspendLaunchpads.elemList;
+ int numElems = cUnit->suspendLaunchpads.numUsed;
+ for (int i = 0; i < numElems; i++) {
+ oatResetRegPool(cUnit);
+ oatResetDefTracking(cUnit);
+ LIR* lab = suspendLabel[i];
+ LIR* resumeLab = (LIR*)lab->operands[0];
+ cUnit->currentDalvikOffset = lab->operands[1];
+ oatAppendLIR(cUnit, lab);
#if defined(TARGET_X86)
- opThreadMem(cUnit, kOpBlx,
- ENTRYPOINT_OFFSET(pTestSuspendFromCode));
+ opThreadMem(cUnit, kOpBlx, ENTRYPOINT_OFFSET(pTestSuspendFromCode));
#else
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pTestSuspendFromCode));
- opReg(cUnit, kOpBlx, rTgt);
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pTestSuspendFromCode));
+ opReg(cUnit, kOpBlx, rTgt);
#endif
- opUnconditionalBranch(cUnit, resumeLab);
- }
+ opUnconditionalBranch(cUnit, resumeLab);
+ }
}
void handleIntrinsicLaunchpads(CompilationUnit *cUnit)
{
- LIR** intrinsicLabel = (LIR **)cUnit->intrinsicLaunchpads.elemList;
- int numElems = cUnit->intrinsicLaunchpads.numUsed;
- for (int i = 0; i < numElems; i++) {
- oatResetRegPool(cUnit);
- oatResetDefTracking(cUnit);
- LIR* lab = intrinsicLabel[i];
- MIR* mir = (MIR*)lab->operands[0];
- InvokeType type = (InvokeType)lab->operands[1];
- BasicBlock* bb = (BasicBlock*)lab->operands[3];
- cUnit->currentDalvikOffset = mir->offset;
- oatAppendLIR(cUnit, lab);
- genInvoke(cUnit, bb, mir, type, false /* isRange */);
- LIR* resumeLab = (LIR*)lab->operands[2];
- if (resumeLab != NULL) {
- opUnconditionalBranch(cUnit, resumeLab);
- }
+ LIR** intrinsicLabel = (LIR **)cUnit->intrinsicLaunchpads.elemList;
+ int numElems = cUnit->intrinsicLaunchpads.numUsed;
+ for (int i = 0; i < numElems; i++) {
+ oatResetRegPool(cUnit);
+ oatResetDefTracking(cUnit);
+ LIR* lab = intrinsicLabel[i];
+ MIR* mir = (MIR*)lab->operands[0];
+ InvokeType type = (InvokeType)lab->operands[1];
+ BasicBlock* bb = (BasicBlock*)lab->operands[3];
+ cUnit->currentDalvikOffset = mir->offset;
+ oatAppendLIR(cUnit, lab);
+ genInvoke(cUnit, bb, mir, type, false /* isRange */);
+ LIR* resumeLab = (LIR*)lab->operands[2];
+ if (resumeLab != NULL) {
+ opUnconditionalBranch(cUnit, resumeLab);
}
+ }
}
void handleThrowLaunchpads(CompilationUnit *cUnit)
{
- LIR** throwLabel = (LIR **)cUnit->throwLaunchpads.elemList;
- int numElems = cUnit->throwLaunchpads.numUsed;
- for (int i = 0; i < numElems; i++) {
- oatResetRegPool(cUnit);
- oatResetDefTracking(cUnit);
- LIR* lab = throwLabel[i];
- cUnit->currentDalvikOffset = lab->operands[1];
- oatAppendLIR(cUnit, lab);
- int funcOffset = 0;
- int v1 = lab->operands[2];
- int v2 = lab->operands[3];
- switch (lab->operands[0]) {
- case kThrowNullPointer:
- funcOffset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
- break;
- case kThrowArrayBounds:
- if (v2 != rARG0) {
- opRegCopy(cUnit, rARG0, v1);
- opRegCopy(cUnit, rARG1, v2);
- } else {
- if (v1 == rARG1) {
+ LIR** throwLabel = (LIR **)cUnit->throwLaunchpads.elemList;
+ int numElems = cUnit->throwLaunchpads.numUsed;
+ for (int i = 0; i < numElems; i++) {
+ oatResetRegPool(cUnit);
+ oatResetDefTracking(cUnit);
+ LIR* lab = throwLabel[i];
+ cUnit->currentDalvikOffset = lab->operands[1];
+ oatAppendLIR(cUnit, lab);
+ int funcOffset = 0;
+ int v1 = lab->operands[2];
+ int v2 = lab->operands[3];
+ switch (lab->operands[0]) {
+ case kThrowNullPointer:
+ funcOffset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+ break;
+ case kThrowArrayBounds:
+ if (v2 != rARG0) {
+ opRegCopy(cUnit, rARG0, v1);
+ opRegCopy(cUnit, rARG1, v2);
+ } else {
+ if (v1 == rARG1) {
#if defined(TARGET_ARM)
- int rTmp = r12;
+ int rTmp = r12;
#else
- int rTmp = oatAllocTemp(cUnit);
+ int rTmp = oatAllocTemp(cUnit);
#endif
- opRegCopy(cUnit, rTmp, v1);
- opRegCopy(cUnit, rARG1, v2);
- opRegCopy(cUnit, rARG0, rTmp);
- } else {
- opRegCopy(cUnit, rARG1, v2);
- opRegCopy(cUnit, rARG0, v1);
- }
- }
- funcOffset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
- break;
- case kThrowDivZero:
- funcOffset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
- break;
- case kThrowVerificationError:
- loadConstant(cUnit, rARG0, v1);
- loadConstant(cUnit, rARG1, v2);
- funcOffset =
- ENTRYPOINT_OFFSET(pThrowVerificationErrorFromCode);
- break;
- case kThrowNoSuchMethod:
- opRegCopy(cUnit, rARG0, v1);
- funcOffset =
- ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
- break;
- case kThrowStackOverflow:
- funcOffset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
- // Restore stack alignment
-#if !defined(TARGET_X86)
- opRegImm(cUnit, kOpAdd, rSP, (cUnit->numCoreSpills + cUnit->numFPSpills) * 4);
-#else
- opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize);
-#endif
- break;
- default:
- LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
+ opRegCopy(cUnit, rTmp, v1);
+ opRegCopy(cUnit, rARG1, v2);
+ opRegCopy(cUnit, rARG0, rTmp);
+ } else {
+ opRegCopy(cUnit, rARG1, v2);
+ opRegCopy(cUnit, rARG0, v1);
+ }
}
- oatClobberCalleeSave(cUnit);
+ funcOffset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ break;
+ case kThrowDivZero:
+ funcOffset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+ break;
+ case kThrowVerificationError:
+ loadConstant(cUnit, rARG0, v1);
+ loadConstant(cUnit, rARG1, v2);
+ funcOffset =
+ ENTRYPOINT_OFFSET(pThrowVerificationErrorFromCode);
+ break;
+ case kThrowNoSuchMethod:
+ opRegCopy(cUnit, rARG0, v1);
+ funcOffset =
+ ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
+ break;
+ case kThrowStackOverflow:
+ funcOffset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+ // Restore stack alignment
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, funcOffset);
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ opRegImm(cUnit, kOpAdd, rSP,
+ (cUnit->numCoreSpills + cUnit->numFPSpills) * 4);
#else
- opThreadMem(cUnit, kOpBlx, funcOffset);
+ opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize);
#endif
+ break;
+ default:
+ LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
}
+ oatClobberCalleeSave(cUnit);
+#if !defined(TARGET_X86)
+ int rTgt = loadHelper(cUnit, funcOffset);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
+#else
+ opThreadMem(cUnit, kOpBlx, funcOffset);
+#endif
+ }
}
/* Needed by the Assembler */
void oatSetupResourceMasks(LIR* lir)
{
- setupResourceMasks(lir);
+ setupResourceMasks(lir);
}
bool fastInstance(CompilationUnit* cUnit, uint32_t fieldIdx,
int& fieldOffset, bool& isVolatile, bool isPut)
{
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file, *cUnit->dex_cache,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
- return cUnit->compiler->ComputeInstanceFieldInfo(fieldIdx, &mUnit,
- fieldOffset, isVolatile, isPut);
+ OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
+ *cUnit->dex_file, *cUnit->dex_cache,
+ cUnit->code_item, cUnit->method_idx,
+ cUnit->access_flags);
+ return cUnit->compiler->ComputeInstanceFieldInfo(fieldIdx, &mUnit,
+ fieldOffset, isVolatile, isPut);
}
void genIGet(CompilationUnit* cUnit, MIR* mir, OpSize size,
RegLocation rlDest, RegLocation rlObj,
- bool isLongOrDouble, bool isObject)
+ bool isLongOrDouble, bool isObject)
{
- int fieldOffset;
- bool isVolatile;
- uint32_t fieldIdx = mir->dalvikInsn.vC;
+ int fieldOffset;
+ bool isVolatile;
+ uint32_t fieldIdx = mir->dalvikInsn.vC;
- bool fastPath = fastInstance(cUnit, fieldIdx, fieldOffset, isVolatile,
- false);
+ bool fastPath = fastInstance(cUnit, fieldIdx, fieldOffset, isVolatile, false);
- if (fastPath && !SLOW_FIELD_PATH) {
- RegLocation rlResult;
- RegisterClass regClass = oatRegClassBySize(size);
- DCHECK_GE(fieldOffset, 0);
- rlObj = loadValue(cUnit, rlObj, kCoreReg);
- if (isLongOrDouble) {
- DCHECK(rlDest.wide);
- genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
+ if (fastPath && !SLOW_FIELD_PATH) {
+ RegLocation rlResult;
+ RegisterClass regClass = oatRegClassBySize(size);
+ DCHECK_GE(fieldOffset, 0);
+ rlObj = loadValue(cUnit, rlObj, kCoreReg);
+ if (isLongOrDouble) {
+ DCHECK(rlDest.wide);
+ genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
#if defined(TARGET_X86)
- rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
- genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
- loadBaseDispWide(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
- rlResult.highReg, rlObj.sRegLow);
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
+ rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
+ genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
+ loadBaseDispWide(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
+ rlResult.highReg, rlObj.sRegLow);
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kSY);
+ }
#else
- int regPtr = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
- rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
- loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
- oatFreeTemp(cUnit, regPtr);
+ int regPtr = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
+ rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
+ loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kSY);
+ }
+ oatFreeTemp(cUnit, regPtr);
#endif
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
- genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
- loadBaseDisp(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
- kWord, rlObj.sRegLow);
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
- storeValue(cUnit, rlDest, rlResult);
- }
+ storeValueWide(cUnit, rlDest, rlResult);
} else {
- int getterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pGet64Instance) :
- (isObject ? ENTRYPOINT_OFFSET(pGetObjInstance)
- : ENTRYPOINT_OFFSET(pGet32Instance));
- callRuntimeHelperImmRegLocation(cUnit, getterOffset, fieldIdx, rlObj);
- if (isLongOrDouble) {
- RegLocation rlResult = oatGetReturnWide(cUnit, rlDest.fp);
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- RegLocation rlResult = oatGetReturn(cUnit, rlDest.fp);
- storeValue(cUnit, rlDest, rlResult);
- }
+ rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
+ genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
+ loadBaseDisp(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
+ kWord, rlObj.sRegLow);
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kSY);
+ }
+ storeValue(cUnit, rlDest, rlResult);
}
+ } else {
+ int getterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pGet64Instance) :
+ (isObject ? ENTRYPOINT_OFFSET(pGetObjInstance)
+ : ENTRYPOINT_OFFSET(pGet32Instance));
+ callRuntimeHelperImmRegLocation(cUnit, getterOffset, fieldIdx, rlObj);
+ if (isLongOrDouble) {
+ RegLocation rlResult = oatGetReturnWide(cUnit, rlDest.fp);
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ RegLocation rlResult = oatGetReturn(cUnit, rlDest.fp);
+ storeValue(cUnit, rlDest, rlResult);
+ }
+ }
}
void genIPut(CompilationUnit* cUnit, MIR* mir, OpSize size, RegLocation rlSrc,
- RegLocation rlObj, bool isLongOrDouble, bool isObject)
+ RegLocation rlObj, bool isLongOrDouble, bool isObject)
{
- int fieldOffset;
- bool isVolatile;
- uint32_t fieldIdx = mir->dalvikInsn.vC;
+ int fieldOffset;
+ bool isVolatile;
+ uint32_t fieldIdx = mir->dalvikInsn.vC;
- bool fastPath = fastInstance(cUnit, fieldIdx, fieldOffset, isVolatile,
- true);
- if (fastPath && !SLOW_FIELD_PATH) {
- RegisterClass regClass = oatRegClassBySize(size);
- DCHECK_GE(fieldOffset, 0);
- rlObj = loadValue(cUnit, rlObj, kCoreReg);
- if (isLongOrDouble) {
- int regPtr;
- rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
- genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
- regPtr = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kST);
- }
- storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
- oatFreeTemp(cUnit, regPtr);
- } else {
- rlSrc = loadValue(cUnit, rlSrc, regClass);
- genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kST);
- }
- storeBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlSrc.lowReg, kWord);
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
- if (isObject) {
- markGCCard(cUnit, rlSrc.lowReg, rlObj.lowReg);
- }
- }
+ bool fastPath = fastInstance(cUnit, fieldIdx, fieldOffset, isVolatile,
+ true);
+ if (fastPath && !SLOW_FIELD_PATH) {
+ RegisterClass regClass = oatRegClassBySize(size);
+ DCHECK_GE(fieldOffset, 0);
+ rlObj = loadValue(cUnit, rlObj, kCoreReg);
+ if (isLongOrDouble) {
+ int regPtr;
+ rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
+ genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
+ regPtr = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kST);
+ }
+ storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kSY);
+ }
+ oatFreeTemp(cUnit, regPtr);
} else {
- int setterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pSet64Instance) :
- (isObject ? ENTRYPOINT_OFFSET(pSetObjInstance)
- : ENTRYPOINT_OFFSET(pSet32Instance));
- callRuntimeHelperImmRegLocationRegLocation(cUnit, setterOffset,
- fieldIdx, rlObj, rlSrc);
+ rlSrc = loadValue(cUnit, rlSrc, regClass);
+ genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kST);
+ }
+ storeBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlSrc.lowReg, kWord);
+ if (isVolatile) {
+ oatGenMemBarrier(cUnit, kSY);
+ }
+ if (isObject) {
+ markGCCard(cUnit, rlSrc.lowReg, rlObj.lowReg);
+ }
}
+ } else {
+ int setterOffset = isLongOrDouble ? ENTRYPOINT_OFFSET(pSet64Instance) :
+ (isObject ? ENTRYPOINT_OFFSET(pSetObjInstance)
+ : ENTRYPOINT_OFFSET(pSet32Instance));
+ callRuntimeHelperImmRegLocationRegLocation(cUnit, setterOffset,
+ fieldIdx, rlObj, rlSrc);
+ }
}
void genConstClass(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc)
{
- uint32_t type_idx = mir->dalvikInsn.vB;
- RegLocation rlMethod = loadCurrMethod(cUnit);
- int resReg = oatAllocTemp(cUnit);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- cUnit->dex_cache,
- *cUnit->dex_file,
- type_idx)) {
- // Call out to helper which resolves type and verifies access.
- // Resolved type returned in rRET0.
- callRuntimeHelperImmReg(cUnit,
- ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
- type_idx, rlMethod.lowReg);
- RegLocation rlResult = oatGetReturn(cUnit, false);
- storeValue(cUnit, rlDest, rlResult);
+ uint32_t type_idx = mir->dalvikInsn.vB;
+ RegLocation rlMethod = loadCurrMethod(cUnit);
+ int resReg = oatAllocTemp(cUnit);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
+ cUnit->dex_cache,
+ *cUnit->dex_file,
+ type_idx)) {
+ // Call out to helper which resolves type and verifies access.
+ // Resolved type returned in rRET0.
+ callRuntimeHelperImmReg(cUnit,
+ ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ type_idx, rlMethod.lowReg);
+ RegLocation rlResult = oatGetReturn(cUnit, false);
+ storeValue(cUnit, rlDest, rlResult);
+ } else {
+ // We're don't need access checks, load type from dex cache
+ int32_t dex_cache_offset =
+ Method::DexCacheResolvedTypesOffset().Int32Value();
+ loadWordDisp(cUnit, rlMethod.lowReg, dex_cache_offset, resReg);
+ int32_t offset_of_type =
+ Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
+ * type_idx);
+ loadWordDisp(cUnit, resReg, offset_of_type, rlResult.lowReg);
+ if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(cUnit->dex_cache,
+ type_idx) || SLOW_TYPE_PATH) {
+ // Slow path, at runtime test if type is null and if so initialize
+ oatFlushAllRegs(cUnit);
+ LIR* branch1 = opCmpImmBranch(cUnit, kCondEq, rlResult.lowReg, 0, NULL);
+ // Resolved, store and hop over following code
+ storeValue(cUnit, rlDest, rlResult);
+ /*
+ * Because we have stores of the target value on two paths,
+ * clobber temp tracking for the destination using the ssa name
+ */
+ oatClobberSReg(cUnit, rlDest.sRegLow);
+ LIR* branch2 = opUnconditionalBranch(cUnit,0);
+ // TUNING: move slow path to end & remove unconditional branch
+ LIR* target1 = newLIR0(cUnit, kPseudoTargetLabel);
+ // Call out to helper, which will return resolved type in rARG0
+ callRuntimeHelperImmReg(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeFromCode),
+ type_idx, rlMethod.lowReg);
+ RegLocation rlResult = oatGetReturn(cUnit, false);
+ storeValue(cUnit, rlDest, rlResult);
+ /*
+ * Because we have stores of the target value on two paths,
+ * clobber temp tracking for the destination using the ssa name
+ */
+ oatClobberSReg(cUnit, rlDest.sRegLow);
+ // Rejoin code paths
+ LIR* target2 = newLIR0(cUnit, kPseudoTargetLabel);
+ branch1->target = (LIR*)target1;
+ branch2->target = (LIR*)target2;
} else {
- // We're don't need access checks, load type from dex cache
- int32_t dex_cache_offset =
- Method::DexCacheResolvedTypesOffset().Int32Value();
- loadWordDisp(cUnit, rlMethod.lowReg, dex_cache_offset, resReg);
- int32_t offset_of_type =
- Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
- * type_idx);
- loadWordDisp(cUnit, resReg, offset_of_type, rlResult.lowReg);
- if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(cUnit->dex_cache,
- type_idx) || SLOW_TYPE_PATH) {
- // Slow path, at runtime test if type is null and if so initialize
- oatFlushAllRegs(cUnit);
- LIR* branch1 = opCmpImmBranch(cUnit, kCondEq, rlResult.lowReg, 0,
- NULL);
- // Resolved, store and hop over following code
- storeValue(cUnit, rlDest, rlResult);
- /*
- * Because we have stores of the target value on two paths,
- * clobber temp tracking for the destination using the ssa name
- */
- oatClobberSReg(cUnit, rlDest.sRegLow);
- LIR* branch2 = opUnconditionalBranch(cUnit,0);
- // TUNING: move slow path to end & remove unconditional branch
- LIR* target1 = newLIR0(cUnit, kPseudoTargetLabel);
- // Call out to helper, which will return resolved type in rARG0
- callRuntimeHelperImmReg(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeFromCode),
- type_idx, rlMethod.lowReg);
- RegLocation rlResult = oatGetReturn(cUnit, false);
- storeValue(cUnit, rlDest, rlResult);
- /*
- * Because we have stores of the target value on two paths,
- * clobber temp tracking for the destination using the ssa name
- */
- oatClobberSReg(cUnit, rlDest.sRegLow);
- // Rejoin code paths
- LIR* target2 = newLIR0(cUnit, kPseudoTargetLabel);
- branch1->target = (LIR*)target1;
- branch2->target = (LIR*)target2;
- } else {
- // Fast path, we're done - just store result
- storeValue(cUnit, rlDest, rlResult);
- }
+ // Fast path, we're done - just store result
+ storeValue(cUnit, rlDest, rlResult);
}
+ }
}
void genConstString(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc)
+ RegLocation rlSrc)
{
- /* NOTE: Most strings should be available at compile time */
- uint32_t string_idx = mir->dalvikInsn.vB;
- int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() +
- (sizeof(String*) * string_idx);
- if (!cUnit->compiler->CanAssumeStringIsPresentInDexCache(
- cUnit->dex_cache, string_idx) || SLOW_STRING_PATH) {
- // slow path, resolve string if not in dex cache
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Using explicit registers
- loadCurrMethodDirect(cUnit, rARG2);
- loadWordDisp(cUnit, rARG2,
- Method::DexCacheStringsOffset().Int32Value(), rARG0);
- // Might call out to helper, which will return resolved string in rRET0
+ /* NOTE: Most strings should be available at compile time */
+ uint32_t string_idx = mir->dalvikInsn.vB;
+ int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() +
+ (sizeof(String*) * string_idx);
+ if (!cUnit->compiler->CanAssumeStringIsPresentInDexCache(
+ cUnit->dex_cache, string_idx) || SLOW_STRING_PATH) {
+ // slow path, resolve string if not in dex cache
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Using explicit registers
+ loadCurrMethodDirect(cUnit, rARG2);
+ loadWordDisp(cUnit, rARG2,
+ Method::DexCacheStringsOffset().Int32Value(), rARG0);
+ // Might call out to helper, which will return resolved string in rRET0
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pResolveStringFromCode));
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pResolveStringFromCode));
#endif
- loadWordDisp(cUnit, rRET0, offset_of_string, rARG0);
- loadConstant(cUnit, rARG1, string_idx);
+ loadWordDisp(cUnit, rRET0, offset_of_string, rARG0);
+ loadConstant(cUnit, rARG1, string_idx);
#if defined(TARGET_ARM)
- opRegImm(cUnit, kOpCmp, rRET0, 0); // Is resolved?
- genBarrier(cUnit);
- // For testing, always force through helper
- if (!EXERCISE_SLOWEST_STRING_PATH) {
- opIT(cUnit, kArmCondEq, "T");
- }
- opRegCopy(cUnit, rARG0, rARG2); // .eq
- opReg(cUnit, kOpBlx, rTgt); // .eq, helper(Method*, string_idx)
- oatFreeTemp(cUnit, rTgt);
-#elif defined(TARGET_MIPS)
- LIR* branch = opCmpImmBranch(cUnit, kCondNe, rRET0, 0, NULL);
- opRegCopy(cUnit, rARG0, rARG2); // .eq
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branch->target = target;
-#else
- callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pResolveStringFromCode),
- rARG2, rARG1);
-#endif
- genBarrier(cUnit);
- storeValue(cUnit, rlDest, oatGetReturn(cUnit, false));
- } else {
- RegLocation rlMethod = loadCurrMethod(cUnit);
- int resReg = oatAllocTemp(cUnit);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- loadWordDisp(cUnit, rlMethod.lowReg,
- Method::DexCacheStringsOffset().Int32Value(), resReg);
- loadWordDisp(cUnit, resReg, offset_of_string, rlResult.lowReg);
- storeValue(cUnit, rlDest, rlResult);
+ opRegImm(cUnit, kOpCmp, rRET0, 0); // Is resolved?
+ genBarrier(cUnit);
+ // For testing, always force through helper
+ if (!EXERCISE_SLOWEST_STRING_PATH) {
+ opIT(cUnit, kArmCondEq, "T");
}
+ opRegCopy(cUnit, rARG0, rARG2); // .eq
+ opReg(cUnit, kOpBlx, rTgt); // .eq, helper(Method*, string_idx)
+ oatFreeTemp(cUnit, rTgt);
+#elif defined(TARGET_MIPS)
+ LIR* branch = opCmpImmBranch(cUnit, kCondNe, rRET0, 0, NULL);
+ opRegCopy(cUnit, rARG0, rARG2); // .eq
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branch->target = target;
+#else
+ callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pResolveStringFromCode),
+ rARG2, rARG1);
+#endif
+ genBarrier(cUnit);
+ storeValue(cUnit, rlDest, oatGetReturn(cUnit, false));
+ } else {
+ RegLocation rlMethod = loadCurrMethod(cUnit);
+ int resReg = oatAllocTemp(cUnit);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ loadWordDisp(cUnit, rlMethod.lowReg,
+ Method::DexCacheStringsOffset().Int32Value(), resReg);
+ loadWordDisp(cUnit, resReg, offset_of_string, rlResult.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
+ }
}
/*
@@ -1217,185 +1212,185 @@
*/
void genNewInstance(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest)
{
- oatFlushAllRegs(cUnit); /* Everything to home location */
- uint32_t type_idx = mir->dalvikInsn.vB;
- // alloc will always check for resolution, do we also need to verify
- // access because the verifier was unable to?
- int funcOffset;
- if (cUnit->compiler->CanAccessInstantiableTypeWithoutChecks(
- cUnit->method_idx, cUnit->dex_cache, *cUnit->dex_file, type_idx)) {
- funcOffset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
- } else {
- funcOffset =
- ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
- }
- callRuntimeHelperImmMethod(cUnit, funcOffset, type_idx);
- RegLocation rlResult = oatGetReturn(cUnit, false);
- storeValue(cUnit, rlDest, rlResult);
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ uint32_t type_idx = mir->dalvikInsn.vB;
+ // alloc will always check for resolution, do we also need to verify
+ // access because the verifier was unable to?
+ int funcOffset;
+ if (cUnit->compiler->CanAccessInstantiableTypeWithoutChecks(
+ cUnit->method_idx, cUnit->dex_cache, *cUnit->dex_file, type_idx)) {
+ funcOffset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+ } else {
+ funcOffset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+ }
+ callRuntimeHelperImmMethod(cUnit, funcOffset, type_idx);
+ RegLocation rlResult = oatGetReturn(cUnit, false);
+ storeValue(cUnit, rlDest, rlResult);
}
void genThrow(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- callRuntimeHelperRegLocation(cUnit, ENTRYPOINT_OFFSET(pDeliverException), rlSrc);
+ oatFlushAllRegs(cUnit);
+ callRuntimeHelperRegLocation(cUnit, ENTRYPOINT_OFFSET(pDeliverException),
+ rlSrc);
}
void genInstanceof(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- // May generate a call - use explicit registers
- oatLockCallTemps(cUnit);
- uint32_t type_idx = mir->dalvikInsn.vC;
- loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method*
- int classReg = rARG2; // rARG2 will hold the Class*
- if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- cUnit->dex_cache,
- *cUnit->dex_file,
- type_idx)) {
- // Check we have access to type_idx and if not throw IllegalAccessError,
- // returns Class* in rARG0
- callRuntimeHelperImm(cUnit,
- ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
- type_idx);
- opRegCopy(cUnit, classReg, rRET0); // Align usage with fast path
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
- } else {
- // Load dex cache entry into classReg (rARG2)
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
- loadWordDisp(cUnit, rARG1,
- Method::DexCacheResolvedTypesOffset().Int32Value(),
- classReg);
- int32_t offset_of_type =
- Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
- * type_idx);
- loadWordDisp(cUnit, classReg, offset_of_type, classReg);
- if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
- cUnit->dex_cache, type_idx)) {
- // Need to test presence of type in dex cache at runtime
- LIR* hopBranch = opCmpImmBranch(cUnit, kCondNe, classReg, 0, NULL);
- // Not resolved
- // Call out to helper, which will return resolved type in rRET0
- callRuntimeHelperImm(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeFromCode),
- type_idx);
- opRegCopy(cUnit, rARG2, rRET0); // Align usage with fast path
- loadValueDirectFixed(cUnit, rlSrc, rARG0); /* reload Ref */
- // Rejoin code paths
- LIR* hopTarget = newLIR0(cUnit, kPseudoTargetLabel);
- hopBranch->target = (LIR*)hopTarget;
- }
+ oatFlushAllRegs(cUnit);
+ // May generate a call - use explicit registers
+ oatLockCallTemps(cUnit);
+ uint32_t type_idx = mir->dalvikInsn.vC;
+ loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method*
+ int classReg = rARG2; // rARG2 will hold the Class*
+ if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
+ cUnit->dex_cache,
+ *cUnit->dex_file,
+ type_idx)) {
+ // Check we have access to type_idx and if not throw IllegalAccessError,
+ // returns Class* in rARG0
+ callRuntimeHelperImm(cUnit,
+ ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ type_idx);
+ opRegCopy(cUnit, classReg, rRET0); // Align usage with fast path
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
+ } else {
+ // Load dex cache entry into classReg (rARG2)
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
+ loadWordDisp(cUnit, rARG1,
+ Method::DexCacheResolvedTypesOffset().Int32Value(), classReg);
+ int32_t offset_of_type =
+ Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
+ * type_idx);
+ loadWordDisp(cUnit, classReg, offset_of_type, classReg);
+ if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
+ cUnit->dex_cache, type_idx)) {
+ // Need to test presence of type in dex cache at runtime
+ LIR* hopBranch = opCmpImmBranch(cUnit, kCondNe, classReg, 0, NULL);
+ // Not resolved
+ // Call out to helper, which will return resolved type in rRET0
+ callRuntimeHelperImm(cUnit, ENTRYPOINT_OFFSET(pInitializeTypeFromCode),
+ type_idx);
+ opRegCopy(cUnit, rARG2, rRET0); // Align usage with fast path
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); /* reload Ref */
+ // Rejoin code paths
+ LIR* hopTarget = newLIR0(cUnit, kPseudoTargetLabel);
+ hopBranch->target = (LIR*)hopTarget;
}
- /* rARG0 is ref, rARG2 is class. If ref==null, use directly as bool result */
- LIR* branch1 = opCmpImmBranch(cUnit, kCondEq, rARG0, 0, NULL);
- /* load object->klass_ */
- DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
- loadWordDisp(cUnit, rARG0, Object::ClassOffset().Int32Value(), rARG1);
- /* rARG0 is ref, rARG1 is ref->klass_, rARG2 is class */
+ }
+ /* rARG0 is ref, rARG2 is class. If ref==null, use directly as bool result */
+ LIR* branch1 = opCmpImmBranch(cUnit, kCondEq, rARG0, 0, NULL);
+ /* load object->klass_ */
+ DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
+ loadWordDisp(cUnit, rARG0, Object::ClassOffset().Int32Value(), rARG1);
+ /* rARG0 is ref, rARG1 is ref->klass_, rARG2 is class */
#if defined(TARGET_ARM)
- /* Uses conditional nullification */
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
- opRegReg(cUnit, kOpCmp, rARG1, rARG2); // Same?
- opIT(cUnit, kArmCondEq, "EE"); // if-convert the test
- loadConstant(cUnit, rARG0, 1); // .eq case - load true
- opRegCopy(cUnit, rARG0, rARG2); // .ne case - arg0 <= class
- opReg(cUnit, kOpBlx, rTgt); // .ne case: helper(class, ref->class)
- oatFreeTemp(cUnit, rTgt);
+ /* Uses conditional nullification */
+ int rTgt = loadHelper(cUnit,
+ ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ opRegReg(cUnit, kOpCmp, rARG1, rARG2); // Same?
+ opIT(cUnit, kArmCondEq, "EE"); // if-convert the test
+ loadConstant(cUnit, rARG0, 1); // .eq case - load true
+ opRegCopy(cUnit, rARG0, rARG2); // .ne case - arg0 <= class
+ opReg(cUnit, kOpBlx, rTgt); // .ne case: helper(class, ref->class)
+ oatFreeTemp(cUnit, rTgt);
#else
- /* Uses branchovers */
- loadConstant(cUnit, rARG0, 1); // assume true
- LIR* branchover = opCmpBranch(cUnit, kCondEq, rARG1, rARG2, NULL);
+ /* Uses branchovers */
+ loadConstant(cUnit, rARG0, 1); // assume true
+ LIR* branchover = opCmpBranch(cUnit, kCondEq, rARG1, rARG2, NULL);
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
- opRegCopy(cUnit, rARG0, rARG2); // .ne case - arg0 <= class
- opReg(cUnit, kOpBlx, rTgt); // .ne case: helper(class, ref->class)
- oatFreeTemp(cUnit, rTgt);
+ int rTgt = loadHelper(cUnit,
+ ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ opRegCopy(cUnit, rARG0, rARG2); // .ne case - arg0 <= class
+ opReg(cUnit, kOpBlx, rTgt); // .ne case: helper(class, ref->class)
+ oatFreeTemp(cUnit, rTgt);
#else
- opRegCopy(cUnit, rARG0, rARG2);
- opThreadMem(cUnit, kOpBlx,
- ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ opRegCopy(cUnit, rARG0, rARG2);
+ opThreadMem(cUnit, kOpBlx,
+ ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
#endif
#endif
- oatClobberCalleeSave(cUnit);
- /* branch targets here */
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- RegLocation rlResult = oatGetReturn(cUnit, false);
- storeValue(cUnit, rlDest, rlResult);
- branch1->target = target;
+ oatClobberCalleeSave(cUnit);
+ /* branch targets here */
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ RegLocation rlResult = oatGetReturn(cUnit, false);
+ storeValue(cUnit, rlDest, rlResult);
+ branch1->target = target;
#if !defined(TARGET_ARM)
- branchover->target = target;
+ branchover->target = target;
#endif
}
void genCheckCast(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- // May generate a call - use explicit registers
- oatLockCallTemps(cUnit);
- uint32_t type_idx = mir->dalvikInsn.vB;
- loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method*
- int classReg = rARG2; // rARG2 will hold the Class*
- if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- cUnit->dex_cache,
- *cUnit->dex_file,
- type_idx)) {
- // Check we have access to type_idx and if not throw IllegalAccessError,
- // returns Class* in rRET0
- // InitializeTypeAndVerifyAccess(idx, method)
- callRuntimeHelperImmReg(cUnit,
- ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
- type_idx, rARG1);
- opRegCopy(cUnit, classReg, rRET0); // Align usage with fast path
- } else {
- // Load dex cache entry into classReg (rARG2)
- loadWordDisp(cUnit, rARG1,
- Method::DexCacheResolvedTypesOffset().Int32Value(),
- classReg);
- int32_t offset_of_type =
- Array::DataOffset(sizeof(Class*)).Int32Value() +
- (sizeof(Class*) * type_idx);
- loadWordDisp(cUnit, classReg, offset_of_type, classReg);
- if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
- cUnit->dex_cache, type_idx)) {
- // Need to test presence of type in dex cache at runtime
- LIR* hopBranch = opCmpImmBranch(cUnit, kCondNe, classReg, 0, NULL);
- // Not resolved
- // Call out to helper, which will return resolved type in rARG0
- // InitializeTypeFromCode(idx, method)
- callRuntimeHelperImmReg(cUnit,
- ENTRYPOINT_OFFSET(pInitializeTypeFromCode),
- type_idx, rARG1);
- opRegCopy(cUnit, classReg, rARG0); // Align usage with fast path
- // Rejoin code paths
- LIR* hopTarget = newLIR0(cUnit, kPseudoTargetLabel);
- hopBranch->target = (LIR*)hopTarget;
- }
+ oatFlushAllRegs(cUnit);
+ // May generate a call - use explicit registers
+ oatLockCallTemps(cUnit);
+ uint32_t type_idx = mir->dalvikInsn.vB;
+ loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method*
+ int classReg = rARG2; // rARG2 will hold the Class*
+ if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
+ cUnit->dex_cache,
+ *cUnit->dex_file,
+ type_idx)) {
+ // Check we have access to type_idx and if not throw IllegalAccessError,
+ // returns Class* in rRET0
+ // InitializeTypeAndVerifyAccess(idx, method)
+ callRuntimeHelperImmReg(cUnit,
+ ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ type_idx, rARG1);
+ opRegCopy(cUnit, classReg, rRET0); // Align usage with fast path
+ } else {
+ // Load dex cache entry into classReg (rARG2)
+ loadWordDisp(cUnit, rARG1,
+ Method::DexCacheResolvedTypesOffset().Int32Value(), classReg);
+ int32_t offset_of_type =
+ Array::DataOffset(sizeof(Class*)).Int32Value() +
+ (sizeof(Class*) * type_idx);
+ loadWordDisp(cUnit, classReg, offset_of_type, classReg);
+ if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
+ cUnit->dex_cache, type_idx)) {
+ // Need to test presence of type in dex cache at runtime
+ LIR* hopBranch = opCmpImmBranch(cUnit, kCondNe, classReg, 0, NULL);
+ // Not resolved
+ // Call out to helper, which will return resolved type in rARG0
+ // InitializeTypeFromCode(idx, method)
+ callRuntimeHelperImmReg(cUnit,
+ ENTRYPOINT_OFFSET(pInitializeTypeFromCode),
+ type_idx, rARG1);
+ opRegCopy(cUnit, classReg, rARG0); // Align usage with fast path
+ // Rejoin code paths
+ LIR* hopTarget = newLIR0(cUnit, kPseudoTargetLabel);
+ hopBranch->target = (LIR*)hopTarget;
}
- // At this point, classReg (rARG2) has class
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
- /* Null is OK - continue */
- LIR* branch1 = opCmpImmBranch(cUnit, kCondEq, rARG0, 0, NULL);
- /* load object->klass_ */
- DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
- loadWordDisp(cUnit, rARG0, Object::ClassOffset().Int32Value(), rARG1);
- /* rARG1 now contains object->klass_ */
+ }
+ // At this point, classReg (rARG2) has class
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
+ /* Null is OK - continue */
+ LIR* branch1 = opCmpImmBranch(cUnit, kCondEq, rARG0, 0, NULL);
+ /* load object->klass_ */
+ DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
+ loadWordDisp(cUnit, rARG0, Object::ClassOffset().Int32Value(), rARG1);
+ /* rARG1 now contains object->klass_ */
#if defined(TARGET_MIPS) || defined(TARGET_X86)
- LIR* branch2 = opCmpBranch(cUnit, kCondEq, rARG1, classReg, NULL);
- callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pCheckCastFromCode),
- rARG1, rARG2);
+ LIR* branch2 = opCmpBranch(cUnit, kCondEq, rARG1, classReg, NULL);
+ callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pCheckCastFromCode),
+ rARG1, rARG2);
#else // defined(TARGET_ARM)
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pCheckCastFromCode));
- opRegReg(cUnit, kOpCmp, rARG1, classReg);
- LIR* branch2 = opCondBranch(cUnit, kCondEq, NULL); /* If eq, trivial yes */
- opRegCopy(cUnit, rARG0, rARG1);
- opRegCopy(cUnit, rARG1, rARG2);
- oatClobberCalleeSave(cUnit);
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pCheckCastFromCode));
+ opRegReg(cUnit, kOpCmp, rARG1, classReg);
+ LIR* branch2 = opCondBranch(cUnit, kCondEq, NULL); /* If eq, trivial yes */
+ opRegCopy(cUnit, rARG0, rARG1);
+ opRegCopy(cUnit, rARG1, rARG2);
+ oatClobberCalleeSave(cUnit);
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
#endif
- /* branch target here */
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branch1->target = target;
- branch2->target = target;
+ /* branch target here */
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branch1->target = target;
+ branch2->target = target;
}
/*
@@ -1403,72 +1398,71 @@
*
*/
void genArrayObjPut(CompilationUnit* cUnit, MIR* mir, RegLocation rlArray,
- RegLocation rlIndex, RegLocation rlSrc, int scale)
+ RegLocation rlIndex, RegLocation rlSrc, int scale)
{
- int lenOffset = Array::LengthOffset().Int32Value();
- int dataOffset = Array::DataOffset(sizeof(Object*)).Int32Value();
+ int lenOffset = Array::LengthOffset().Int32Value();
+ int dataOffset = Array::DataOffset(sizeof(Object*)).Int32Value();
- oatFlushAllRegs(cUnit); // Use explicit registers
- oatLockCallTemps(cUnit);
+ oatFlushAllRegs(cUnit); // Use explicit registers
+ oatLockCallTemps(cUnit);
- int rValue = rARG0; // Register holding value
- int rArrayClass = rARG1; // Register holding array's Class
- int rArray = rARG2; // Register holding array
- int rIndex = rARG3; // Register holding index into array
+ int rValue = rARG0; // Register holding value
+ int rArrayClass = rARG1; // Register holding array's Class
+ int rArray = rARG2; // Register holding array
+ int rIndex = rARG3; // Register holding index into array
- loadValueDirectFixed(cUnit, rlArray, rArray); // Grab array
- loadValueDirectFixed(cUnit, rlSrc, rValue); // Grab value
- loadValueDirectFixed(cUnit, rlIndex, rIndex); // Grab index
+ loadValueDirectFixed(cUnit, rlArray, rArray); // Grab array
+ loadValueDirectFixed(cUnit, rlSrc, rValue); // Grab value
+ loadValueDirectFixed(cUnit, rlIndex, rIndex); // Grab index
- genNullCheck(cUnit, rlArray.sRegLow, rArray, mir); // NPE?
+ genNullCheck(cUnit, rlArray.sRegLow, rArray, mir); // NPE?
- // Store of null?
- LIR* null_value_check = opCmpImmBranch(cUnit, kCondEq, rValue, 0, NULL);
+ // Store of null?
+ LIR* null_value_check = opCmpImmBranch(cUnit, kCondEq, rValue, 0, NULL);
- // Get the array's class.
- loadWordDisp(cUnit, rArray, Object::ClassOffset().Int32Value(), rArrayClass);
- callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode),
- rValue, rArrayClass);
- // Redo loadValues in case they didn't survive the call.
- loadValueDirectFixed(cUnit, rlArray, rArray); // Reload array
- loadValueDirectFixed(cUnit, rlIndex, rIndex); // Reload index
- loadValueDirectFixed(cUnit, rlSrc, rValue); // Reload value
- rArrayClass = INVALID_REG;
+ // Get the array's class.
+ loadWordDisp(cUnit, rArray, Object::ClassOffset().Int32Value(), rArrayClass);
+ callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode),
+ rValue, rArrayClass);
+ // Redo loadValues in case they didn't survive the call.
+ loadValueDirectFixed(cUnit, rlArray, rArray); // Reload array
+ loadValueDirectFixed(cUnit, rlIndex, rIndex); // Reload index
+ loadValueDirectFixed(cUnit, rlSrc, rValue); // Reload value
+ rArrayClass = INVALID_REG;
- // Branch here if value to be stored == null
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- null_value_check->target = target;
+ // Branch here if value to be stored == null
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ null_value_check->target = target;
#if defined(TARGET_X86)
- // make an extra temp available for card mark below
- oatFreeTemp(cUnit, rARG1);
- if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
- genRegMemCheck(cUnit, kCondUge, rIndex, rArray,
- lenOffset, mir, kThrowArrayBounds);
- }
- storeBaseIndexedDisp(cUnit, NULL, rArray, rIndex, scale,
- dataOffset, rValue, INVALID_REG, kWord,
- INVALID_SREG);
+ // make an extra temp available for card mark below
+ oatFreeTemp(cUnit, rARG1);
+ if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
+ genRegMemCheck(cUnit, kCondUge, rIndex, rArray,
+ lenOffset, mir, kThrowArrayBounds);
+ }
+ storeBaseIndexedDisp(cUnit, NULL, rArray, rIndex, scale,
+ dataOffset, rValue, INVALID_REG, kWord, INVALID_SREG);
#else
- bool needsRangeCheck = (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK));
- int regLen = INVALID_REG;
- if (needsRangeCheck) {
- regLen = rARG1;
- loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen); // Get len
- }
- /* rPtr -> array data */
- int rPtr = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpAdd, rPtr, rArray, dataOffset);
- if (needsRangeCheck) {
- genRegRegCheck(cUnit, kCondCs, rIndex, regLen, mir,
- kThrowArrayBounds);
- }
- storeBaseIndexed(cUnit, rPtr, rIndex, rValue, scale, kWord);
- oatFreeTemp(cUnit, rPtr);
+ bool needsRangeCheck = (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK));
+ int regLen = INVALID_REG;
+ if (needsRangeCheck) {
+ regLen = rARG1;
+ loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen); // Get len
+ }
+ /* rPtr -> array data */
+ int rPtr = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpAdd, rPtr, rArray, dataOffset);
+ if (needsRangeCheck) {
+ genRegRegCheck(cUnit, kCondCs, rIndex, regLen, mir,
+ kThrowArrayBounds);
+ }
+ storeBaseIndexed(cUnit, rPtr, rIndex, rValue, scale, kWord);
+ oatFreeTemp(cUnit, rPtr);
#endif
- oatFreeTemp(cUnit, rIndex);
- markGCCard(cUnit, rValue, rArray);
+ oatFreeTemp(cUnit, rIndex);
+ markGCCard(cUnit, rValue, rArray);
}
/*
@@ -1478,93 +1472,95 @@
RegLocation rlArray, RegLocation rlIndex,
RegLocation rlDest, int scale)
{
- RegisterClass regClass = oatRegClassBySize(size);
- int lenOffset = Array::LengthOffset().Int32Value();
- int dataOffset;
- RegLocation rlResult;
- rlArray = loadValue(cUnit, rlArray, kCoreReg);
- rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
+ RegisterClass regClass = oatRegClassBySize(size);
+ int lenOffset = Array::LengthOffset().Int32Value();
+ int dataOffset;
+ RegLocation rlResult;
+ rlArray = loadValue(cUnit, rlArray, kCoreReg);
+ rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
- if (size == kLong || size == kDouble) {
- dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
+ if (size == kLong || size == kDouble) {
+ dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
- /* null object? */
- genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, mir);
+ /* null object? */
+ genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, mir);
#if defined(TARGET_X86)
- if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
- genRegMemCheck(cUnit, kCondUge, rlIndex.lowReg, rlArray.lowReg,
- lenOffset, mir, kThrowArrayBounds);
- }
- if ((size == kLong) || (size == kDouble)) {
- rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
- loadBaseIndexedDisp(cUnit, NULL, rlArray.lowReg, rlIndex.lowReg, scale, dataOffset,
- rlResult.lowReg, rlResult.highReg, size, INVALID_SREG);
+ if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
+ genRegMemCheck(cUnit, kCondUge, rlIndex.lowReg, rlArray.lowReg,
+ lenOffset, mir, kThrowArrayBounds);
+ }
+ if ((size == kLong) || (size == kDouble)) {
+ rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
+ loadBaseIndexedDisp(cUnit, NULL, rlArray.lowReg, rlIndex.lowReg, scale,
+ dataOffset, rlResult.lowReg, rlResult.highReg, size,
+ INVALID_SREG);
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
- loadBaseIndexedDisp(cUnit, NULL, rlArray.lowReg, rlIndex.lowReg, scale, dataOffset,
- rlResult.lowReg, INVALID_REG, size, INVALID_SREG);
+ loadBaseIndexedDisp(cUnit, NULL, rlArray.lowReg, rlIndex.lowReg, scale,
+ dataOffset, rlResult.lowReg, INVALID_REG, size,
+ INVALID_SREG);
- storeValue(cUnit, rlDest, rlResult);
- }
+ storeValue(cUnit, rlDest, rlResult);
+ }
#else
- int regPtr = oatAllocTemp(cUnit);
- bool needsRangeCheck = (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK));
- int regLen = INVALID_REG;
- if (needsRangeCheck) {
- regLen = oatAllocTemp(cUnit);
- /* Get len */
- loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
- }
- /* regPtr -> array data */
- opRegRegImm(cUnit, kOpAdd, regPtr, rlArray.lowReg, dataOffset);
- oatFreeTemp(cUnit, rlArray.lowReg);
- if ((size == kLong) || (size == kDouble)) {
- if (scale) {
- int rNewIndex = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
- opRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
- oatFreeTemp(cUnit, rNewIndex);
- } else {
- opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
- }
- oatFreeTemp(cUnit, rlIndex.lowReg);
- rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
-
- if (needsRangeCheck) {
- // TODO: change kCondCS to a more meaningful name, is the sense of
- // carry-set/clear flipped?
- genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
- kThrowArrayBounds);
- oatFreeTemp(cUnit, regLen);
- }
- loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
-
- oatFreeTemp(cUnit, regPtr);
- storeValueWide(cUnit, rlDest, rlResult);
+ int regPtr = oatAllocTemp(cUnit);
+ bool needsRangeCheck = (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK));
+ int regLen = INVALID_REG;
+ if (needsRangeCheck) {
+ regLen = oatAllocTemp(cUnit);
+ /* Get len */
+ loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
+ }
+ /* regPtr -> array data */
+ opRegRegImm(cUnit, kOpAdd, regPtr, rlArray.lowReg, dataOffset);
+ oatFreeTemp(cUnit, rlArray.lowReg);
+ if ((size == kLong) || (size == kDouble)) {
+ if (scale) {
+ int rNewIndex = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
+ opRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
+ oatFreeTemp(cUnit, rNewIndex);
} else {
- rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
-
- if (needsRangeCheck) {
- // TODO: change kCondCS to a more meaningful name, is the sense of
- // carry-set/clear flipped?
- genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
- kThrowArrayBounds);
- oatFreeTemp(cUnit, regLen);
- }
- loadBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlResult.lowReg,
- scale, size);
-
- oatFreeTemp(cUnit, regPtr);
- storeValue(cUnit, rlDest, rlResult);
+ opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
}
+ oatFreeTemp(cUnit, rlIndex.lowReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
+
+ if (needsRangeCheck) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
+ kThrowArrayBounds);
+ oatFreeTemp(cUnit, regLen);
+ }
+ loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
+
+ oatFreeTemp(cUnit, regPtr);
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
+
+ if (needsRangeCheck) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
+ kThrowArrayBounds);
+ oatFreeTemp(cUnit, regLen);
+ }
+ loadBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlResult.lowReg,
+ scale, size);
+
+ oatFreeTemp(cUnit, regPtr);
+ storeValue(cUnit, rlDest, rlResult);
+ }
#endif
}
@@ -1576,139 +1572,137 @@
RegLocation rlArray, RegLocation rlIndex,
RegLocation rlSrc, int scale)
{
- RegisterClass regClass = oatRegClassBySize(size);
- int lenOffset = Array::LengthOffset().Int32Value();
- int dataOffset;
+ RegisterClass regClass = oatRegClassBySize(size);
+ int lenOffset = Array::LengthOffset().Int32Value();
+ int dataOffset;
- if (size == kLong || size == kDouble) {
- dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
+ if (size == kLong || size == kDouble) {
+ dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
- rlArray = loadValue(cUnit, rlArray, kCoreReg);
- rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
+ rlArray = loadValue(cUnit, rlArray, kCoreReg);
+ rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
#if !defined(TARGET_X86)
- int regPtr;
- if (oatIsTemp(cUnit, rlArray.lowReg)) {
- oatClobber(cUnit, rlArray.lowReg);
- regPtr = rlArray.lowReg;
- } else {
- regPtr = oatAllocTemp(cUnit);
- opRegCopy(cUnit, regPtr, rlArray.lowReg);
- }
+ int regPtr;
+ if (oatIsTemp(cUnit, rlArray.lowReg)) {
+ oatClobber(cUnit, rlArray.lowReg);
+ regPtr = rlArray.lowReg;
+ } else {
+ regPtr = oatAllocTemp(cUnit);
+ opRegCopy(cUnit, regPtr, rlArray.lowReg);
+ }
#endif
- /* null object? */
- genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, mir);
+ /* null object? */
+ genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, mir);
#if defined(TARGET_X86)
- if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
- /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
- genRegMemCheck(cUnit, kCondUge, rlIndex.lowReg, rlArray.lowReg,
- lenOffset, mir, kThrowArrayBounds);
- }
- if ((size == kLong) || (size == kDouble)) {
- rlSrc = loadValueWide(cUnit, rlSrc, regClass);
- } else {
- rlSrc = loadValue(cUnit, rlSrc, regClass);
- }
- storeBaseIndexedDisp(cUnit, NULL, rlArray.lowReg, rlIndex.lowReg, scale, dataOffset,
- rlSrc.lowReg, rlSrc.highReg, size, INVALID_SREG);
+ if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rlIndex >= [rlArray + lenOffset]) goto kThrowArrayBounds */
+ genRegMemCheck(cUnit, kCondUge, rlIndex.lowReg, rlArray.lowReg,
+ lenOffset, mir, kThrowArrayBounds);
+ }
+ if ((size == kLong) || (size == kDouble)) {
+ rlSrc = loadValueWide(cUnit, rlSrc, regClass);
+ } else {
+ rlSrc = loadValue(cUnit, rlSrc, regClass);
+ }
+ storeBaseIndexedDisp(cUnit, NULL, rlArray.lowReg, rlIndex.lowReg, scale,
+ dataOffset, rlSrc.lowReg, rlSrc.highReg, size,
+ INVALID_SREG);
#else
- bool needsRangeCheck = (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK));
- int regLen = INVALID_REG;
- if (needsRangeCheck) {
- regLen = oatAllocTemp(cUnit);
- //NOTE: max live temps(4) here.
- /* Get len */
- loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
- }
- /* regPtr -> array data */
- opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
- /* at this point, regPtr points to array, 2 live temps */
- if ((size == kLong) || (size == kDouble)) {
- //TUNING: specific wide routine that can handle fp regs
- if (scale) {
- int rNewIndex = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
- opRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
- oatFreeTemp(cUnit, rNewIndex);
- } else {
- opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
- }
- rlSrc = loadValueWide(cUnit, rlSrc, regClass);
-
- if (needsRangeCheck) {
- genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
- kThrowArrayBounds);
- oatFreeTemp(cUnit, regLen);
- }
-
- storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
-
- oatFreeTemp(cUnit, regPtr);
+ bool needsRangeCheck = (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK));
+ int regLen = INVALID_REG;
+ if (needsRangeCheck) {
+ regLen = oatAllocTemp(cUnit);
+ //NOTE: max live temps(4) here.
+ /* Get len */
+ loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
+ }
+ /* regPtr -> array data */
+ opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
+ /* at this point, regPtr points to array, 2 live temps */
+ if ((size == kLong) || (size == kDouble)) {
+ //TUNING: specific wide routine that can handle fp regs
+ if (scale) {
+ int rNewIndex = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
+ opRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
+ oatFreeTemp(cUnit, rNewIndex);
} else {
- rlSrc = loadValue(cUnit, rlSrc, regClass);
- if (needsRangeCheck) {
- genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
- kThrowArrayBounds);
- oatFreeTemp(cUnit, regLen);
- }
- storeBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlSrc.lowReg,
- scale, size);
+ opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
}
+ rlSrc = loadValueWide(cUnit, rlSrc, regClass);
+
+ if (needsRangeCheck) {
+ genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
+ kThrowArrayBounds);
+ oatFreeTemp(cUnit, regLen);
+ }
+
+ storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
+
+ oatFreeTemp(cUnit, regPtr);
+ } else {
+ rlSrc = loadValue(cUnit, rlSrc, regClass);
+ if (needsRangeCheck) {
+ genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
+ kThrowArrayBounds);
+ oatFreeTemp(cUnit, regLen);
+ }
+ storeBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlSrc.lowReg,
+ scale, size);
+ }
#endif
}
void genLong3Addr(CompilationUnit* cUnit, MIR* mir, OpKind firstOp,
OpKind secondOp, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
- RegLocation rlResult;
+ RegLocation rlResult;
#if defined(TARGET_ARM)
- /*
- * NOTE: This is the one place in the code in which we might have
- * as many as six live temporary registers. There are 5 in the normal
- * set for Arm. Until we have spill capabilities, temporarily add
- * lr to the temp set. It is safe to do this locally, but note that
- * lr is used explicitly elsewhere in the code generator and cannot
- * normally be used as a general temp register.
- */
- oatMarkTemp(cUnit, rLR); // Add lr to the temp pool
- oatFreeTemp(cUnit, rLR); // and make it available
+ /*
+ * NOTE: This is the one place in the code in which we might have
+ * as many as six live temporary registers. There are 5 in the normal
+ * set for Arm. Until we have spill capabilities, temporarily add
+ * lr to the temp set. It is safe to do this locally, but note that
+ * lr is used explicitly elsewhere in the code generator and cannot
+ * normally be used as a general temp register.
+ */
+ oatMarkTemp(cUnit, rLR); // Add lr to the temp pool
+ oatFreeTemp(cUnit, rLR); // and make it available
#endif
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- // The longs may overlap - use intermediate temp if so
- if (rlResult.lowReg == rlSrc1.highReg) {
- int tReg = oatAllocTemp(cUnit);
- opRegCopy(cUnit, tReg, rlSrc1.highReg);
- opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg,
- rlSrc2.lowReg);
- opRegRegReg(cUnit, secondOp, rlResult.highReg, tReg,
- rlSrc2.highReg);
- oatFreeTemp(cUnit, tReg);
- } else {
- opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg,
- rlSrc2.lowReg);
- opRegRegReg(cUnit, secondOp, rlResult.highReg, rlSrc1.highReg,
- rlSrc2.highReg);
- }
- /*
- * NOTE: If rlDest refers to a frame variable in a large frame, the
- * following storeValueWide might need to allocate a temp register.
- * To further work around the lack of a spill capability, explicitly
- * free any temps from rlSrc1 & rlSrc2 that aren't still live in rlResult.
- * Remove when spill is functional.
- */
- freeRegLocTemps(cUnit, rlResult, rlSrc1);
- freeRegLocTemps(cUnit, rlResult, rlSrc2);
- storeValueWide(cUnit, rlDest, rlResult);
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ // The longs may overlap - use intermediate temp if so
+ if (rlResult.lowReg == rlSrc1.highReg) {
+ int tReg = oatAllocTemp(cUnit);
+ opRegCopy(cUnit, tReg, rlSrc1.highReg);
+ opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+ opRegRegReg(cUnit, secondOp, rlResult.highReg, tReg, rlSrc2.highReg);
+ oatFreeTemp(cUnit, tReg);
+ } else {
+ opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+ opRegRegReg(cUnit, secondOp, rlResult.highReg, rlSrc1.highReg,
+ rlSrc2.highReg);
+ }
+ /*
+ * NOTE: If rlDest refers to a frame variable in a large frame, the
+ * following storeValueWide might need to allocate a temp register.
+ * To further work around the lack of a spill capability, explicitly
+ * free any temps from rlSrc1 & rlSrc2 that aren't still live in rlResult.
+ * Remove when spill is functional.
+ */
+ freeRegLocTemps(cUnit, rlResult, rlSrc1);
+ freeRegLocTemps(cUnit, rlResult, rlSrc2);
+ storeValueWide(cUnit, rlDest, rlResult);
#if defined(TARGET_ARM)
- oatClobber(cUnit, rLR);
- oatUnmarkTemp(cUnit, rLR); // Remove lr from the temp pool
+ oatClobber(cUnit, rLR);
+ oatUnmarkTemp(cUnit, rLR); // Remove lr from the temp pool
#endif
}
@@ -1716,168 +1710,165 @@
bool genShiftOpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc1, RegLocation rlShift)
{
- int funcOffset;
+ int funcOffset;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::SHL_LONG:
- case Instruction::SHL_LONG_2ADDR:
- funcOffset = ENTRYPOINT_OFFSET(pShlLong);
- break;
- case Instruction::SHR_LONG:
- case Instruction::SHR_LONG_2ADDR:
- funcOffset = ENTRYPOINT_OFFSET(pShrLong);
- break;
- case Instruction::USHR_LONG:
- case Instruction::USHR_LONG_2ADDR:
- funcOffset = ENTRYPOINT_OFFSET(pUshrLong);
- break;
- default:
- LOG(FATAL) << "Unexpected case";
- return true;
- }
- oatFlushAllRegs(cUnit); /* Send everything to home location */
- callRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlShift);
- RegLocation rlResult = oatGetReturnWide(cUnit, false);
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ funcOffset = ENTRYPOINT_OFFSET(pShlLong);
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ funcOffset = ENTRYPOINT_OFFSET(pShrLong);
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ funcOffset = ENTRYPOINT_OFFSET(pUshrLong);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case";
+ return true;
+ }
+ oatFlushAllRegs(cUnit); /* Send everything to home location */
+ callRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlShift);
+ RegLocation rlResult = oatGetReturnWide(cUnit, false);
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
}
bool genArithOpInt(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
- OpKind op = kOpBkpt;
- bool callOut = false;
- bool checkZero = false;
- bool unary = false;
- RegLocation rlResult;
- bool shiftOp = false;
- int funcOffset;
- int retReg = rRET0;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::NEG_INT:
- op = kOpNeg;
- unary = true;
- break;
- case Instruction::NOT_INT:
- op = kOpMvn;
- unary = true;
- break;
- case Instruction::ADD_INT:
- case Instruction::ADD_INT_2ADDR:
- op = kOpAdd;
- break;
- case Instruction::SUB_INT:
- case Instruction::SUB_INT_2ADDR:
- op = kOpSub;
- break;
- case Instruction::MUL_INT:
- case Instruction::MUL_INT_2ADDR:
- op = kOpMul;
- break;
- case Instruction::DIV_INT:
- case Instruction::DIV_INT_2ADDR:
- checkZero = true;
- op = kOpDiv;
- callOut = true;
- funcOffset = ENTRYPOINT_OFFSET(pIdivmod);
- retReg = rRET0;
- break;
- /* NOTE: returns in rARG1 */
- case Instruction::REM_INT:
- case Instruction::REM_INT_2ADDR:
- checkZero = true;
- op = kOpRem;
- callOut = true;
- funcOffset = ENTRYPOINT_OFFSET(pIdivmod);
- retReg = rRET1;
- break;
- case Instruction::AND_INT:
- case Instruction::AND_INT_2ADDR:
- op = kOpAnd;
- break;
- case Instruction::OR_INT:
- case Instruction::OR_INT_2ADDR:
- op = kOpOr;
- break;
- case Instruction::XOR_INT:
- case Instruction::XOR_INT_2ADDR:
- op = kOpXor;
- break;
- case Instruction::SHL_INT:
- case Instruction::SHL_INT_2ADDR:
- shiftOp = true;
- op = kOpLsl;
- break;
- case Instruction::SHR_INT:
- case Instruction::SHR_INT_2ADDR:
- shiftOp = true;
- op = kOpAsr;
- break;
- case Instruction::USHR_INT:
- case Instruction::USHR_INT_2ADDR:
- shiftOp = true;
- op = kOpLsr;
- break;
- default:
- LOG(FATAL) << "Invalid word arith op: " <<
- (int)mir->dalvikInsn.opcode;
- }
- if (!callOut) {
- if (unary) {
- rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegReg(cUnit, op, rlResult.lowReg,
- rlSrc1.lowReg);
- } else {
- if (shiftOp) {
-#if !defined(TARGET_X86)
- rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
- int tReg = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpAnd, tReg, rlSrc2.lowReg, 31);
-#else
- // X86 doesn't require masking and must use ECX
- loadValueDirectFixed(cUnit, rlSrc2, rCX);
- int tReg = rCX;
-#endif
- rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegReg(cUnit, op, rlResult.lowReg,
- rlSrc1.lowReg, tReg);
- oatFreeTemp(cUnit, tReg);
- } else {
- rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegReg(cUnit, op, rlResult.lowReg,
- rlSrc1.lowReg, rlSrc2.lowReg);
- }
- }
- storeValue(cUnit, rlDest, rlResult);
+ OpKind op = kOpBkpt;
+ bool callOut = false;
+ bool checkZero = false;
+ bool unary = false;
+ RegLocation rlResult;
+ bool shiftOp = false;
+ int funcOffset;
+ int retReg = rRET0;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::NEG_INT:
+ op = kOpNeg;
+ unary = true;
+ break;
+ case Instruction::NOT_INT:
+ op = kOpMvn;
+ unary = true;
+ break;
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ op = kOpAdd;
+ break;
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ op = kOpSub;
+ break;
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ op = kOpMul;
+ break;
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ checkZero = true;
+ op = kOpDiv;
+ callOut = true;
+ funcOffset = ENTRYPOINT_OFFSET(pIdivmod);
+ retReg = rRET0;
+ break;
+ /* NOTE: returns in rARG1 */
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ checkZero = true;
+ op = kOpRem;
+ callOut = true;
+ funcOffset = ENTRYPOINT_OFFSET(pIdivmod);
+ retReg = rRET1;
+ break;
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ op = kOpAnd;
+ break;
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ op = kOpOr;
+ break;
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ op = kOpXor;
+ break;
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ shiftOp = true;
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ shiftOp = true;
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
+ shiftOp = true;
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Invalid word arith op: " <<
+ (int)mir->dalvikInsn.opcode;
+ }
+ if (!callOut) {
+ if (unary) {
+ rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegReg(cUnit, op, rlResult.lowReg, rlSrc1.lowReg);
} else {
- RegLocation rlResult;
- oatFlushAllRegs(cUnit); /* Send everything to home location */
- loadValueDirectFixed(cUnit, rlSrc2, rARG1);
+ if (shiftOp) {
#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, funcOffset);
-#endif
- loadValueDirectFixed(cUnit, rlSrc1, rARG0);
- if (checkZero) {
- genImmedCheck(cUnit, kCondEq, rARG1, 0, mir, kThrowDivZero);
- }
-#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+ int tReg = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpAnd, tReg, rlSrc2.lowReg, 31);
#else
- opThreadMem(cUnit, kOpBlx, funcOffset);
+ // X86 doesn't require masking and must use ECX
+ loadValueDirectFixed(cUnit, rlSrc2, rCX);
+ int tReg = rCX;
#endif
- if (retReg == rRET0)
- rlResult = oatGetReturn(cUnit, false);
- else
- rlResult = oatGetReturnAlt(cUnit);
- storeValue(cUnit, rlDest, rlResult);
+ rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegReg(cUnit, op, rlResult.lowReg, rlSrc1.lowReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ } else {
+ rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegReg(cUnit, op, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+ }
}
- return false;
+ storeValue(cUnit, rlDest, rlResult);
+ } else {
+ RegLocation rlResult;
+ oatFlushAllRegs(cUnit); /* Send everything to home location */
+ loadValueDirectFixed(cUnit, rlSrc2, rARG1);
+#if !defined(TARGET_X86)
+ int rTgt = loadHelper(cUnit, funcOffset);
+#endif
+ loadValueDirectFixed(cUnit, rlSrc1, rARG0);
+ if (checkZero) {
+ genImmedCheck(cUnit, kCondEq, rARG1, 0, mir, kThrowDivZero);
+ }
+#if !defined(TARGET_X86)
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
+#else
+ opThreadMem(cUnit, kOpBlx, funcOffset);
+#endif
+ if (retReg == rRET0)
+ rlResult = oatGetReturn(cUnit, false);
+ else
+ rlResult = oatGetReturnAlt(cUnit);
+ storeValue(cUnit, rlDest, rlResult);
+ }
+ return false;
}
/*
@@ -1888,88 +1879,88 @@
bool isPowerOfTwo(int x)
{
- return (x & (x - 1)) == 0;
+ return (x & (x - 1)) == 0;
}
// Returns true if no more than two bits are set in 'x'.
bool isPopCountLE2(unsigned int x)
{
- x &= x - 1;
- return (x & (x - 1)) == 0;
+ x &= x - 1;
+ return (x & (x - 1)) == 0;
}
// Returns the index of the lowest set bit in 'x'.
int lowestSetBit(unsigned int x) {
- int bit_posn = 0;
- while ((x & 0xf) == 0) {
- bit_posn += 4;
- x >>= 4;
- }
- while ((x & 1) == 0) {
- bit_posn++;
- x >>= 1;
- }
- return bit_posn;
+ int bit_posn = 0;
+ while ((x & 0xf) == 0) {
+ bit_posn += 4;
+ x >>= 4;
+ }
+ while ((x & 1) == 0) {
+ bit_posn++;
+ x >>= 1;
+ }
+ return bit_posn;
}
// Returns true if it added instructions to 'cUnit' to divide 'rlSrc' by 'lit'
// and store the result in 'rlDest'.
bool handleEasyDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
- RegLocation rlSrc, RegLocation rlDest, int lit)
+ RegLocation rlSrc, RegLocation rlDest, int lit)
{
#if defined(TARGET_ARM)
- // No divide instruction for Arm, so check for more special cases
- if (lit < 2) {
- return false;
- }
- if (!isPowerOfTwo(lit)) {
- return smallLiteralDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit);
- }
+ // No divide instruction for Arm, so check for more special cases
+ if (lit < 2) {
+ return false;
+ }
+ if (!isPowerOfTwo(lit)) {
+ return smallLiteralDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit);
+ }
#else
- if (lit < 2 || !isPowerOfTwo(lit)) {
- return false;
- }
+ if (lit < 2 || !isPowerOfTwo(lit)) {
+ return false;
+ }
#endif
- int k = lowestSetBit(lit);
- if (k >= 30) {
- // Avoid special cases.
- return false;
- }
- bool div = (dalvikOpcode == Instruction::DIV_INT_LIT8 ||
- dalvikOpcode == Instruction::DIV_INT_LIT16);
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- if (div) {
- int tReg = oatAllocTemp(cUnit);
- if (lit == 2) {
- // Division by 2 is by far the most common division by constant.
- opRegRegImm(cUnit, kOpLsr, tReg, rlSrc.lowReg, 32 - k);
- opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
- opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
- } else {
- opRegRegImm(cUnit, kOpAsr, tReg, rlSrc.lowReg, 31);
- opRegRegImm(cUnit, kOpLsr, tReg, tReg, 32 - k);
- opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
- opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
- }
+ int k = lowestSetBit(lit);
+ if (k >= 30) {
+ // Avoid special cases.
+ return false;
+ }
+ bool div = (dalvikOpcode == Instruction::DIV_INT_LIT8 ||
+ dalvikOpcode == Instruction::DIV_INT_LIT16);
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ if (div) {
+ int tReg = oatAllocTemp(cUnit);
+ if (lit == 2) {
+ // Division by 2 is by far the most common division by constant.
+ opRegRegImm(cUnit, kOpLsr, tReg, rlSrc.lowReg, 32 - k);
+ opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
+ opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
} else {
- int tReg1 = oatAllocTemp(cUnit);
- int tReg2 = oatAllocTemp(cUnit);
- if (lit == 2) {
- opRegRegImm(cUnit, kOpLsr, tReg1, rlSrc.lowReg, 32 - k);
- opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
- opRegRegImm(cUnit, kOpAnd, tReg2, tReg2, lit -1);
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
- } else {
- opRegRegImm(cUnit, kOpAsr, tReg1, rlSrc.lowReg, 31);
- opRegRegImm(cUnit, kOpLsr, tReg1, tReg1, 32 - k);
- opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
- opRegRegImm(cUnit, kOpAnd, tReg2, tReg2, lit - 1);
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
- }
+ opRegRegImm(cUnit, kOpAsr, tReg, rlSrc.lowReg, 31);
+ opRegRegImm(cUnit, kOpLsr, tReg, tReg, 32 - k);
+ opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
+ opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
}
- storeValue(cUnit, rlDest, rlResult);
- return true;
+ } else {
+ int tReg1 = oatAllocTemp(cUnit);
+ int tReg2 = oatAllocTemp(cUnit);
+ if (lit == 2) {
+ opRegRegImm(cUnit, kOpLsr, tReg1, rlSrc.lowReg, 32 - k);
+ opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
+ opRegRegImm(cUnit, kOpAnd, tReg2, tReg2, lit -1);
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
+ } else {
+ opRegRegImm(cUnit, kOpAsr, tReg1, rlSrc.lowReg, 31);
+ opRegRegImm(cUnit, kOpLsr, tReg1, tReg1, 32 - k);
+ opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
+ opRegRegImm(cUnit, kOpAnd, tReg2, tReg2, lit - 1);
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
+ }
+ }
+ storeValue(cUnit, rlDest, rlResult);
+ return true;
}
void genMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
@@ -1977,17 +1968,17 @@
int firstBit, int secondBit)
{
#if defined(TARGET_ARM)
- opRegRegRegShift(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, rlSrc.lowReg,
- encodeShift(kArmLsl, secondBit - firstBit));
+ opRegRegRegShift(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, rlSrc.lowReg,
+ encodeShift(kArmLsl, secondBit - firstBit));
#else
- int tReg = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, secondBit - firstBit);
- opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, tReg);
- oatFreeTemp(cUnit, tReg);
+ int tReg = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, secondBit - firstBit);
+ opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, tReg);
+ oatFreeTemp(cUnit, tReg);
#endif
- if (firstBit != 0) {
- opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
- }
+ if (firstBit != 0) {
+ opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
+ }
}
// Returns true if it added instructions to 'cUnit' to multiply 'rlSrc' by 'lit'
@@ -1995,331 +1986,330 @@
bool handleEasyMultiply(CompilationUnit* cUnit, RegLocation rlSrc,
RegLocation rlDest, int lit)
{
- // Can we simplify this multiplication?
- bool powerOfTwo = false;
- bool popCountLE2 = false;
- bool powerOfTwoMinusOne = false;
- if (lit < 2) {
- // Avoid special cases.
- return false;
- } else if (isPowerOfTwo(lit)) {
- powerOfTwo = true;
- } else if (isPopCountLE2(lit)) {
- popCountLE2 = true;
- } else if (isPowerOfTwo(lit + 1)) {
- powerOfTwoMinusOne = true;
- } else {
- return false;
- }
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- if (powerOfTwo) {
- // Shift.
- opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlSrc.lowReg,
- lowestSetBit(lit));
- } else if (popCountLE2) {
- // Shift and add and shift.
- int firstBit = lowestSetBit(lit);
- int secondBit = lowestSetBit(lit ^ (1 << firstBit));
- genMultiplyByTwoBitMultiplier(cUnit, rlSrc, rlResult, lit,
- firstBit, secondBit);
- } else {
- // Reverse subtract: (src << (shift + 1)) - src.
- DCHECK(powerOfTwoMinusOne);
- // TUNING: rsb dst, src, src lsl#lowestSetBit(lit + 1)
- int tReg = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lowestSetBit(lit + 1));
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
- }
- storeValue(cUnit, rlDest, rlResult);
- return true;
+ // Can we simplify this multiplication?
+ bool powerOfTwo = false;
+ bool popCountLE2 = false;
+ bool powerOfTwoMinusOne = false;
+ if (lit < 2) {
+ // Avoid special cases.
+ return false;
+ } else if (isPowerOfTwo(lit)) {
+ powerOfTwo = true;
+ } else if (isPopCountLE2(lit)) {
+ popCountLE2 = true;
+ } else if (isPowerOfTwo(lit + 1)) {
+ powerOfTwoMinusOne = true;
+ } else {
+ return false;
+ }
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ if (powerOfTwo) {
+ // Shift.
+ opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlSrc.lowReg,
+ lowestSetBit(lit));
+ } else if (popCountLE2) {
+ // Shift and add and shift.
+ int firstBit = lowestSetBit(lit);
+ int secondBit = lowestSetBit(lit ^ (1 << firstBit));
+ genMultiplyByTwoBitMultiplier(cUnit, rlSrc, rlResult, lit,
+ firstBit, secondBit);
+ } else {
+ // Reverse subtract: (src << (shift + 1)) - src.
+ DCHECK(powerOfTwoMinusOne);
+ // TUNING: rsb dst, src, src lsl#lowestSetBit(lit + 1)
+ int tReg = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lowestSetBit(lit + 1));
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
+ }
+ storeValue(cUnit, rlDest, rlResult);
+ return true;
}
bool genArithOpIntLit(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc, int lit)
{
- Instruction::Code dalvikOpcode = mir->dalvikInsn.opcode;
- RegLocation rlResult;
- OpKind op = (OpKind)0; /* Make gcc happy */
- int shiftOp = false;
- bool isDiv = false;
- int funcOffset;
+ Instruction::Code dalvikOpcode = mir->dalvikInsn.opcode;
+ RegLocation rlResult;
+ OpKind op = (OpKind)0; /* Make gcc happy */
+ int shiftOp = false;
+ bool isDiv = false;
+ int funcOffset;
- switch (dalvikOpcode) {
- case Instruction::RSUB_INT_LIT8:
- case Instruction::RSUB_INT: {
- int tReg;
- //TUNING: add support for use of Arm rsub op
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- tReg = oatAllocTemp(cUnit);
- loadConstant(cUnit, tReg, lit);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
- tReg, rlSrc.lowReg);
- storeValue(cUnit, rlDest, rlResult);
- return false;
- break;
- }
-
- case Instruction::ADD_INT_LIT8:
- case Instruction::ADD_INT_LIT16:
- op = kOpAdd;
- break;
- case Instruction::MUL_INT_LIT8:
- case Instruction::MUL_INT_LIT16: {
- if (handleEasyMultiply(cUnit, rlSrc, rlDest, lit)) {
- return false;
- }
- op = kOpMul;
- break;
- }
- case Instruction::AND_INT_LIT8:
- case Instruction::AND_INT_LIT16:
- op = kOpAnd;
- break;
- case Instruction::OR_INT_LIT8:
- case Instruction::OR_INT_LIT16:
- op = kOpOr;
- break;
- case Instruction::XOR_INT_LIT8:
- case Instruction::XOR_INT_LIT16:
- op = kOpXor;
- break;
- case Instruction::SHL_INT_LIT8:
- lit &= 31;
- shiftOp = true;
- op = kOpLsl;
- break;
- case Instruction::SHR_INT_LIT8:
- lit &= 31;
- shiftOp = true;
- op = kOpAsr;
- break;
- case Instruction::USHR_INT_LIT8:
- lit &= 31;
- shiftOp = true;
- op = kOpLsr;
- break;
-
- case Instruction::DIV_INT_LIT8:
- case Instruction::DIV_INT_LIT16:
- case Instruction::REM_INT_LIT8:
- case Instruction::REM_INT_LIT16:
- if (lit == 0) {
- genImmedCheck(cUnit, kCondAl, 0, 0, mir, kThrowDivZero);
- return false;
- }
- if (handleEasyDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit)) {
- return false;
- }
- oatFlushAllRegs(cUnit); /* Everything to home location */
- loadValueDirectFixed(cUnit, rlSrc, rARG0);
- oatClobber(cUnit, rARG0);
- funcOffset = ENTRYPOINT_OFFSET(pIdivmod);
- if ((dalvikOpcode == Instruction::DIV_INT_LIT8) ||
- (dalvikOpcode == Instruction::DIV_INT_LIT16)) {
- isDiv = true;
- } else {
- isDiv = false;
- }
- callRuntimeHelperRegImm(cUnit, funcOffset, rARG0, lit);
- if (isDiv)
- rlResult = oatGetReturn(cUnit, false);
- else
- rlResult = oatGetReturnAlt(cUnit);
- storeValue(cUnit, rlDest, rlResult);
- return false;
- break;
- default:
- return true;
+ switch (dalvikOpcode) {
+ case Instruction::RSUB_INT_LIT8:
+ case Instruction::RSUB_INT: {
+ int tReg;
+ //TUNING: add support for use of Arm rsub op
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ tReg = oatAllocTemp(cUnit);
+ loadConstant(cUnit, tReg, lit);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
+ return false;
+ break;
}
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- // Avoid shifts by literal 0 - no support in Thumb. Change to copy
- if (shiftOp && (lit == 0)) {
- opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- } else {
- opRegRegImm(cUnit, op, rlResult.lowReg, rlSrc.lowReg, lit);
+
+ case Instruction::ADD_INT_LIT8:
+ case Instruction::ADD_INT_LIT16:
+ op = kOpAdd;
+ break;
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::MUL_INT_LIT16: {
+ if (handleEasyMultiply(cUnit, rlSrc, rlDest, lit)) {
+ return false;
+ }
+ op = kOpMul;
+ break;
}
- storeValue(cUnit, rlDest, rlResult);
- return false;
+ case Instruction::AND_INT_LIT8:
+ case Instruction::AND_INT_LIT16:
+ op = kOpAnd;
+ break;
+ case Instruction::OR_INT_LIT8:
+ case Instruction::OR_INT_LIT16:
+ op = kOpOr;
+ break;
+ case Instruction::XOR_INT_LIT8:
+ case Instruction::XOR_INT_LIT16:
+ op = kOpXor;
+ break;
+ case Instruction::SHL_INT_LIT8:
+ lit &= 31;
+ shiftOp = true;
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_INT_LIT8:
+ lit &= 31;
+ shiftOp = true;
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_INT_LIT8:
+ lit &= 31;
+ shiftOp = true;
+ op = kOpLsr;
+ break;
+
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT_LIT8:
+ case Instruction::REM_INT_LIT16:
+ if (lit == 0) {
+ genImmedCheck(cUnit, kCondAl, 0, 0, mir, kThrowDivZero);
+ return false;
+ }
+ if (handleEasyDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit)) {
+ return false;
+ }
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ loadValueDirectFixed(cUnit, rlSrc, rARG0);
+ oatClobber(cUnit, rARG0);
+ funcOffset = ENTRYPOINT_OFFSET(pIdivmod);
+ if ((dalvikOpcode == Instruction::DIV_INT_LIT8) ||
+ (dalvikOpcode == Instruction::DIV_INT_LIT16)) {
+ isDiv = true;
+ } else {
+ isDiv = false;
+ }
+ callRuntimeHelperRegImm(cUnit, funcOffset, rARG0, lit);
+ if (isDiv)
+ rlResult = oatGetReturn(cUnit, false);
+ else
+ rlResult = oatGetReturnAlt(cUnit);
+ storeValue(cUnit, rlDest, rlResult);
+ return false;
+ break;
+ default:
+ return true;
+ }
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ // Avoid shifts by literal 0 - no support in Thumb. Change to copy
+ if (shiftOp && (lit == 0)) {
+ opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ } else {
+ opRegRegImm(cUnit, op, rlResult.lowReg, rlSrc.lowReg, lit);
+ }
+ storeValue(cUnit, rlDest, rlResult);
+ return false;
}
bool genArithOpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
- RegLocation rlResult;
- OpKind firstOp = kOpBkpt;
- OpKind secondOp = kOpBkpt;
- bool callOut = false;
- bool checkZero = false;
- int funcOffset;
- int retReg = rRET0;
+ RegLocation rlResult;
+ OpKind firstOp = kOpBkpt;
+ OpKind secondOp = kOpBkpt;
+ bool callOut = false;
+ bool checkZero = false;
+ int funcOffset;
+ int retReg = rRET0;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::NOT_LONG:
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- // Check for destructive overlap
- if (rlResult.lowReg == rlSrc2.highReg) {
- int tReg = oatAllocTemp(cUnit);
- opRegCopy(cUnit, tReg, rlSrc2.highReg);
- opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
- opRegReg(cUnit, kOpMvn, rlResult.highReg, tReg);
- oatFreeTemp(cUnit, tReg);
- } else {
- opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
- opRegReg(cUnit, kOpMvn, rlResult.highReg, rlSrc2.highReg);
- }
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
- break;
- case Instruction::ADD_LONG:
- case Instruction::ADD_LONG_2ADDR:
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::NOT_LONG:
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ // Check for destructive overlap
+ if (rlResult.lowReg == rlSrc2.highReg) {
+ int tReg = oatAllocTemp(cUnit);
+ opRegCopy(cUnit, tReg, rlSrc2.highReg);
+ opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
+ opRegReg(cUnit, kOpMvn, rlResult.highReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ } else {
+ opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
+ opRegReg(cUnit, kOpMvn, rlResult.highReg, rlSrc2.highReg);
+ }
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
+ break;
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
#if defined(TARGET_MIPS) || defined(TARGET_X86)
- return genAddLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ return genAddLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
#else
- firstOp = kOpAdd;
- secondOp = kOpAdc;
- break;
+ firstOp = kOpAdd;
+ secondOp = kOpAdc;
+ break;
#endif
- case Instruction::SUB_LONG:
- case Instruction::SUB_LONG_2ADDR:
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
#if defined(TARGET_MIPS) || defined(TARGET_X86)
- return genSubLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ return genSubLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
#else
- firstOp = kOpSub;
- secondOp = kOpSbc;
- break;
+ firstOp = kOpSub;
+ secondOp = kOpSbc;
+ break;
#endif
- case Instruction::MUL_LONG:
- case Instruction::MUL_LONG_2ADDR:
- callOut = true;
- retReg = rRET0;
- funcOffset = ENTRYPOINT_OFFSET(pLmul);
- break;
- case Instruction::DIV_LONG:
- case Instruction::DIV_LONG_2ADDR:
- callOut = true;
- checkZero = true;
- retReg = rRET0;
- funcOffset = ENTRYPOINT_OFFSET(pLdivmod);
- break;
- case Instruction::REM_LONG:
- case Instruction::REM_LONG_2ADDR:
- callOut = true;
- checkZero = true;
- funcOffset = ENTRYPOINT_OFFSET(pLdiv);
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ callOut = true;
+ retReg = rRET0;
+ funcOffset = ENTRYPOINT_OFFSET(pLmul);
+ break;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ callOut = true;
+ checkZero = true;
+ retReg = rRET0;
+ funcOffset = ENTRYPOINT_OFFSET(pLdivmod);
+ break;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ callOut = true;
+ checkZero = true;
+ funcOffset = ENTRYPOINT_OFFSET(pLdiv);
#if defined(TARGET_ARM)
- /* NOTE - result is in rARG2/rARG3 instead of rRET0/rRET1 */
- retReg = rARG2;
+ /* NOTE - result is in rARG2/rARG3 instead of rRET0/rRET1 */
+ retReg = rARG2;
#else
- retReg = rRET0;
+ retReg = rRET0;
#endif
- break;
- case Instruction::AND_LONG_2ADDR:
- case Instruction::AND_LONG:
+ break;
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::AND_LONG:
#if defined(TARGET_X86)
- return genAndLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ return genAndLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
#else
- firstOp = kOpAnd;
- secondOp = kOpAnd;
- break;
+ firstOp = kOpAnd;
+ secondOp = kOpAnd;
+ break;
#endif
- case Instruction::OR_LONG:
- case Instruction::OR_LONG_2ADDR:
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
#if defined(TARGET_X86)
- return genOrLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ return genOrLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
#else
- firstOp = kOpOr;
- secondOp = kOpOr;
- break;
+ firstOp = kOpOr;
+ secondOp = kOpOr;
+ break;
#endif
- case Instruction::XOR_LONG:
- case Instruction::XOR_LONG_2ADDR:
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
#if defined(TARGET_X86)
- return genXorLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ return genXorLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
#else
- firstOp = kOpXor;
- secondOp = kOpXor;
- break;
+ firstOp = kOpXor;
+ secondOp = kOpXor;
+ break;
#endif
- case Instruction::NEG_LONG: {
- return genNegLong(cUnit, mir, rlDest, rlSrc2);
- }
- default:
- LOG(FATAL) << "Invalid long arith op";
+ case Instruction::NEG_LONG: {
+ return genNegLong(cUnit, mir, rlDest, rlSrc2);
}
- if (!callOut) {
- genLong3Addr(cUnit, mir, firstOp, secondOp, rlDest, rlSrc1, rlSrc2);
+ default:
+ LOG(FATAL) << "Invalid long arith op";
+ }
+ if (!callOut) {
+ genLong3Addr(cUnit, mir, firstOp, secondOp, rlDest, rlSrc1, rlSrc2);
+ } else {
+ oatFlushAllRegs(cUnit); /* Send everything to home location */
+ if (checkZero) {
+ loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
+#if !defined(TARGET_X86)
+ int rTgt = loadHelper(cUnit, funcOffset);
+#endif
+ int tReg = oatAllocTemp(cUnit);
+#if defined(TARGET_ARM)
+ newLIR4(cUnit, kThumb2OrrRRRs, tReg, rARG2, rARG3, 0);
+ oatFreeTemp(cUnit, tReg);
+ genCheck(cUnit, kCondEq, mir, kThrowDivZero);
+#else
+ opRegRegReg(cUnit, kOpOr, tReg, rARG2, rARG3);
+#endif
+ genImmedCheck(cUnit, kCondEq, tReg, 0, mir, kThrowDivZero);
+ oatFreeTemp(cUnit, tReg);
+ loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
+#if !defined(TARGET_X86)
+ opReg(cUnit, kOpBlx, rTgt);
+ oatFreeTemp(cUnit, rTgt);
+#else
+ opThreadMem(cUnit, kOpBlx, funcOffset);
+#endif
} else {
- oatFlushAllRegs(cUnit); /* Send everything to home location */
- if (checkZero) {
- loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
-#if !defined(TARGET_X86)
- int rTgt = loadHelper(cUnit, funcOffset);
-#endif
- int tReg = oatAllocTemp(cUnit);
-#if defined(TARGET_ARM)
- newLIR4(cUnit, kThumb2OrrRRRs, tReg, rARG2, rARG3, 0);
- oatFreeTemp(cUnit, tReg);
- genCheck(cUnit, kCondEq, mir, kThrowDivZero);
-#else
- opRegRegReg(cUnit, kOpOr, tReg, rARG2, rARG3);
-#endif
- genImmedCheck(cUnit, kCondEq, tReg, 0, mir, kThrowDivZero);
- oatFreeTemp(cUnit, tReg);
- loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
-#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rTgt);
- oatFreeTemp(cUnit, rTgt);
-#else
- opThreadMem(cUnit, kOpBlx, funcOffset);
-#endif
- } else {
- callRuntimeHelperRegLocationRegLocation(cUnit, funcOffset,
- rlSrc1, rlSrc2);
- }
- // Adjust return regs in to handle case of rem returning rARG2/rARG3
- if (retReg == rRET0)
- rlResult = oatGetReturnWide(cUnit, false);
- else
- rlResult = oatGetReturnWideAlt(cUnit);
- storeValueWide(cUnit, rlDest, rlResult);
+ callRuntimeHelperRegLocationRegLocation(cUnit, funcOffset,
+ rlSrc1, rlSrc2);
}
- return false;
+ // Adjust return regs in to handle case of rem returning rARG2/rARG3
+ if (retReg == rRET0)
+ rlResult = oatGetReturnWide(cUnit, false);
+ else
+ rlResult = oatGetReturnWideAlt(cUnit);
+ storeValueWide(cUnit, rlDest, rlResult);
+ }
+ return false;
}
bool genConversionCall(CompilationUnit* cUnit, MIR* mir, int funcOffset,
- int srcSize, int tgtSize)
+ int srcSize, int tgtSize)
{
- /*
- * Don't optimize the register usage since it calls out to support
- * functions
- */
- RegLocation rlSrc;
- RegLocation rlDest;
- oatFlushAllRegs(cUnit); /* Send everything to home location */
- if (srcSize == 1) {
- rlSrc = oatGetSrc(cUnit, mir, 0);
- loadValueDirectFixed(cUnit, rlSrc, rARG0);
- } else {
- rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- loadValueDirectWideFixed(cUnit, rlSrc, rARG0, rARG1);
- }
- callRuntimeHelperRegLocation(cUnit, funcOffset, rlSrc);
- if (tgtSize == 1) {
- RegLocation rlResult;
- rlDest = oatGetDest(cUnit, mir, 0);
- rlResult = oatGetReturn(cUnit, rlDest.fp);
- storeValue(cUnit, rlDest, rlResult);
- } else {
- RegLocation rlResult;
- rlDest = oatGetDestWide(cUnit, mir, 0, 1);
- rlResult = oatGetReturnWide(cUnit, rlDest.fp);
- storeValueWide(cUnit, rlDest, rlResult);
- }
- return false;
+ /*
+ * Don't optimize the register usage since it calls out to support
+ * functions
+ */
+ RegLocation rlSrc;
+ RegLocation rlDest;
+ oatFlushAllRegs(cUnit); /* Send everything to home location */
+ if (srcSize == 1) {
+ rlSrc = oatGetSrc(cUnit, mir, 0);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0);
+ } else {
+ rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ loadValueDirectWideFixed(cUnit, rlSrc, rARG0, rARG1);
+ }
+ callRuntimeHelperRegLocation(cUnit, funcOffset, rlSrc);
+ if (tgtSize == 1) {
+ RegLocation rlResult;
+ rlDest = oatGetDest(cUnit, mir, 0);
+ rlResult = oatGetReturn(cUnit, rlDest.fp);
+ storeValue(cUnit, rlDest, rlResult);
+ } else {
+ RegLocation rlResult;
+ rlDest = oatGetDestWide(cUnit, mir, 0, 1);
+ rlResult = oatGetReturnWide(cUnit, rlDest.fp);
+ storeValueWide(cUnit, rlDest, rlResult);
+ }
+ return false;
}
void genNegFloat(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
@@ -2327,42 +2317,42 @@
RegLocation rlDest, RegLocation rlSrc1,
RegLocation rlSrc2)
{
- RegLocation rlResult;
- int funcOffset;
+ RegLocation rlResult;
+ int funcOffset;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::ADD_FLOAT_2ADDR:
- case Instruction::ADD_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFadd);
- break;
- case Instruction::SUB_FLOAT_2ADDR:
- case Instruction::SUB_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFsub);
- break;
- case Instruction::DIV_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFdiv);
- break;
- case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::MUL_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFmul);
- break;
- case Instruction::REM_FLOAT_2ADDR:
- case Instruction::REM_FLOAT:
- funcOffset = ENTRYPOINT_OFFSET(pFmodf);
- break;
- case Instruction::NEG_FLOAT: {
- genNegFloat(cUnit, rlDest, rlSrc1);
- return false;
- }
- default:
- return true;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_FLOAT:
+ funcOffset = ENTRYPOINT_OFFSET(pFadd);
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT:
+ funcOffset = ENTRYPOINT_OFFSET(pFsub);
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT:
+ funcOffset = ENTRYPOINT_OFFSET(pFdiv);
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT:
+ funcOffset = ENTRYPOINT_OFFSET(pFmul);
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT:
+ funcOffset = ENTRYPOINT_OFFSET(pFmodf);
+ break;
+ case Instruction::NEG_FLOAT: {
+ genNegFloat(cUnit, rlDest, rlSrc1);
+ return false;
}
- oatFlushAllRegs(cUnit); /* Send everything to home location */
- callRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlSrc2);
- rlResult = oatGetReturn(cUnit, true);
- storeValue(cUnit, rlDest, rlResult);
- return false;
+ default:
+ return true;
+ }
+ oatFlushAllRegs(cUnit); /* Send everything to home location */
+ callRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlSrc2);
+ rlResult = oatGetReturn(cUnit, true);
+ storeValue(cUnit, rlDest, rlResult);
+ return false;
}
void genNegDouble(CompilationUnit* cUnit, RegLocation rlDst, RegLocation rlSrc);
@@ -2370,83 +2360,83 @@
RegLocation rlDest, RegLocation rlSrc1,
RegLocation rlSrc2)
{
- RegLocation rlResult;
- int funcOffset;
+ RegLocation rlResult;
+ int funcOffset;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::ADD_DOUBLE_2ADDR:
- case Instruction::ADD_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pDadd);
- break;
- case Instruction::SUB_DOUBLE_2ADDR:
- case Instruction::SUB_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pDsub);
- break;
- case Instruction::DIV_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pDdiv);
- break;
- case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::MUL_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pDmul);
- break;
- case Instruction::REM_DOUBLE_2ADDR:
- case Instruction::REM_DOUBLE:
- funcOffset = ENTRYPOINT_OFFSET(pFmod);
- break;
- case Instruction::NEG_DOUBLE: {
- genNegDouble(cUnit, rlDest, rlSrc1);
- return false;
- }
- default:
- return true;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ funcOffset = ENTRYPOINT_OFFSET(pDadd);
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ funcOffset = ENTRYPOINT_OFFSET(pDsub);
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ funcOffset = ENTRYPOINT_OFFSET(pDdiv);
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ funcOffset = ENTRYPOINT_OFFSET(pDmul);
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE:
+ funcOffset = ENTRYPOINT_OFFSET(pFmod);
+ break;
+ case Instruction::NEG_DOUBLE: {
+ genNegDouble(cUnit, rlDest, rlSrc1);
+ return false;
}
- oatFlushAllRegs(cUnit); /* Send everything to home location */
- callRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlSrc2);
- rlResult = oatGetReturnWide(cUnit, true);
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
+ default:
+ return true;
+ }
+ oatFlushAllRegs(cUnit); /* Send everything to home location */
+ callRuntimeHelperRegLocationRegLocation(cUnit, funcOffset, rlSrc1, rlSrc2);
+ rlResult = oatGetReturnWide(cUnit, true);
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
}
bool genConversionPortable(CompilationUnit* cUnit, MIR* mir)
{
- Instruction::Code opcode = mir->dalvikInsn.opcode;
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
- switch (opcode) {
- case Instruction::INT_TO_FLOAT:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pI2f),
- 1, 1);
- case Instruction::FLOAT_TO_INT:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pF2iz),
- 1, 1);
- case Instruction::DOUBLE_TO_FLOAT:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pD2f),
- 2, 1);
- case Instruction::FLOAT_TO_DOUBLE:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pF2d),
- 1, 2);
- case Instruction::INT_TO_DOUBLE:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pI2d),
- 1, 2);
- case Instruction::DOUBLE_TO_INT:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pD2iz),
- 2, 1);
- case Instruction::FLOAT_TO_LONG:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pF2l),
- 1, 2);
- case Instruction::LONG_TO_FLOAT:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pL2f),
- 2, 1);
- case Instruction::DOUBLE_TO_LONG:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pD2l),
- 2, 2);
- case Instruction::LONG_TO_DOUBLE:
- return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pL2d),
- 2, 2);
- default:
- return true;
- }
- return false;
+ switch (opcode) {
+ case Instruction::INT_TO_FLOAT:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pI2f),
+ 1, 1);
+ case Instruction::FLOAT_TO_INT:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pF2iz),
+ 1, 1);
+ case Instruction::DOUBLE_TO_FLOAT:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pD2f),
+ 2, 1);
+ case Instruction::FLOAT_TO_DOUBLE:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pF2d),
+ 1, 2);
+ case Instruction::INT_TO_DOUBLE:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pI2d),
+ 1, 2);
+ case Instruction::DOUBLE_TO_INT:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pD2iz),
+ 2, 1);
+ case Instruction::FLOAT_TO_LONG:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pF2l),
+ 1, 2);
+ case Instruction::LONG_TO_FLOAT:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pL2f),
+ 2, 1);
+ case Instruction::DOUBLE_TO_LONG:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pD2l),
+ 2, 2);
+ case Instruction::LONG_TO_DOUBLE:
+ return genConversionCall(cUnit, mir, ENTRYPOINT_OFFSET(pL2d),
+ 2, 2);
+ default:
+ return true;
+ }
+ return false;
}
/*
@@ -2460,95 +2450,96 @@
*/
void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset)
{
- // Following DCHECK verifies that dPC is in range of single load immediate
- DCHECK((offset == DEBUGGER_METHOD_ENTRY) ||
- (offset == DEBUGGER_METHOD_EXIT) || ((offset & 0xffff) == offset));
- oatClobberCalleeSave(cUnit);
+ // Following DCHECK verifies that dPC is in range of single load immediate
+ DCHECK((offset == DEBUGGER_METHOD_ENTRY) ||
+ (offset == DEBUGGER_METHOD_EXIT) || ((offset & 0xffff) == offset));
+ oatClobberCalleeSave(cUnit);
#if defined(TARGET_ARM)
- opRegImm(cUnit, kOpCmp, rSUSPEND, 0);
- opIT(cUnit, kArmCondNe, "T");
- loadConstant(cUnit, rARG2, offset); // arg2 <- Entry code
- opReg(cUnit, kOpBlx, rSUSPEND);
+ opRegImm(cUnit, kOpCmp, rSUSPEND, 0);
+ opIT(cUnit, kArmCondNe, "T");
+ loadConstant(cUnit, rARG2, offset); // arg2 <- Entry code
+ opReg(cUnit, kOpBlx, rSUSPEND);
#elif defined(TARGET_X86)
- UNIMPLEMENTED(FATAL);
+ UNIMPLEMENTED(FATAL);
#else
- LIR* branch = opCmpImmBranch(cUnit, kCondEq, rSUSPEND, 0, NULL);
- loadConstant(cUnit, rARG2, offset);
- opReg(cUnit, kOpBlx, rSUSPEND);
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branch->target = (LIR*)target;
+ LIR* branch = opCmpImmBranch(cUnit, kCondEq, rSUSPEND, 0, NULL);
+ loadConstant(cUnit, rARG2, offset);
+ opReg(cUnit, kOpBlx, rSUSPEND);
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branch->target = (LIR*)target;
#endif
- oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG2);
}
/* Check if we need to check for pending suspend request */
void genSuspendTest(CompilationUnit* cUnit, MIR* mir)
{
- if (NO_SUSPEND || (mir->optimizationFlags & MIR_IGNORE_SUSPEND_CHECK)) {
- return;
- }
- oatFlushAllRegs(cUnit);
- if (cUnit->genDebugger) {
- // If generating code for the debugger, always check for suspension
+ if (NO_SUSPEND || (mir->optimizationFlags & MIR_IGNORE_SUSPEND_CHECK)) {
+ return;
+ }
+ oatFlushAllRegs(cUnit);
+ if (cUnit->genDebugger) {
+ // If generating code for the debugger, always check for suspension
#if defined(TARGET_X86)
- UNIMPLEMENTED(FATAL);
+ UNIMPLEMENTED(FATAL);
#else
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pTestSuspendFromCode));
- opReg(cUnit, kOpBlx, rTgt);
- // Refresh rSUSPEND
- loadWordDisp(cUnit, rSELF,
- ENTRYPOINT_OFFSET(pUpdateDebuggerFromCode),
- rSUSPEND);
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pTestSuspendFromCode));
+ opReg(cUnit, kOpBlx, rTgt);
+ // Refresh rSUSPEND
+ loadWordDisp(cUnit, rSELF,
+ ENTRYPOINT_OFFSET(pUpdateDebuggerFromCode),
+ rSUSPEND);
#endif
- } else {
- LIR* branch = NULL;
+ } else {
+ LIR* branch = NULL;
#if defined(TARGET_ARM)
- // In non-debug case, only check periodically
- newLIR2(cUnit, kThumbSubRI8, rSUSPEND, 1);
- branch = opCondBranch(cUnit, kCondEq, NULL);
+ // In non-debug case, only check periodically
+ newLIR2(cUnit, kThumbSubRI8, rSUSPEND, 1);
+ branch = opCondBranch(cUnit, kCondEq, NULL);
#elif defined(TARGET_X86)
- newLIR2(cUnit, kX86Cmp32TI8, Thread::SuspendCountOffset().Int32Value(), 0);
- branch = opCondBranch(cUnit, kCondNe, NULL);
+ newLIR2(cUnit, kX86Cmp32TI8, Thread::SuspendCountOffset().Int32Value(), 0);
+ branch = opCondBranch(cUnit, kCondNe, NULL);
#else
- opRegImm(cUnit, kOpSub, rSUSPEND, 1);
- branch = opCmpImmBranch(cUnit, kCondEq, rSUSPEND, 0, NULL);
+ opRegImm(cUnit, kOpSub, rSUSPEND, 1);
+ branch = opCmpImmBranch(cUnit, kCondEq, rSUSPEND, 0, NULL);
#endif
- LIR* retLab = newLIR0(cUnit, kPseudoTargetLabel);
- LIR* target = rawLIR(cUnit, cUnit->currentDalvikOffset,
- kPseudoSuspendTarget, (intptr_t)retLab, mir->offset);
- branch->target = (LIR*)target;
- oatInsertGrowableList(cUnit, &cUnit->suspendLaunchpads, (intptr_t)target);
- }
+ LIR* retLab = newLIR0(cUnit, kPseudoTargetLabel);
+ LIR* target = rawLIR(cUnit, cUnit->currentDalvikOffset,
+ kPseudoSuspendTarget, (intptr_t)retLab, mir->offset);
+ branch->target = (LIR*)target;
+ oatInsertGrowableList(cUnit, &cUnit->suspendLaunchpads, (intptr_t)target);
+ }
}
/* Check if we need to check for pending suspend request */
void genSuspendTestAndBranch(CompilationUnit* cUnit, MIR* mir, LIR* target)
{
- if (NO_SUSPEND || (mir->optimizationFlags & MIR_IGNORE_SUSPEND_CHECK)) {
- opUnconditionalBranch(cUnit, target);
- return;
- }
- if (cUnit->genDebugger) {
- genSuspendTest(cUnit, mir);
- opUnconditionalBranch(cUnit, target);
- } else {
+ if (NO_SUSPEND || (mir->optimizationFlags & MIR_IGNORE_SUSPEND_CHECK)) {
+ opUnconditionalBranch(cUnit, target);
+ return;
+ }
+ if (cUnit->genDebugger) {
+ genSuspendTest(cUnit, mir);
+ opUnconditionalBranch(cUnit, target);
+ } else {
#if defined(TARGET_ARM)
- // In non-debug case, only check periodically
- newLIR2(cUnit, kThumbSubRI8, rSUSPEND, 1);
- opCondBranch(cUnit, kCondNe, target);
+ // In non-debug case, only check periodically
+ newLIR2(cUnit, kThumbSubRI8, rSUSPEND, 1);
+ opCondBranch(cUnit, kCondNe, target);
#elif defined(TARGET_X86)
- newLIR2(cUnit, kX86Cmp32TI8, Thread::SuspendCountOffset().Int32Value(), 0);
- opCondBranch(cUnit, kCondEq, target);
+ newLIR2(cUnit, kX86Cmp32TI8, Thread::SuspendCountOffset().Int32Value(), 0);
+ opCondBranch(cUnit, kCondEq, target);
#else
- opRegImm(cUnit, kOpSub, rSUSPEND, 1);
- opCmpImmBranch(cUnit, kCondNe, rSUSPEND, 0, target);
+ opRegImm(cUnit, kOpSub, rSUSPEND, 1);
+ opCmpImmBranch(cUnit, kCondNe, rSUSPEND, 0, target);
#endif
- LIR* launchPad = rawLIR(cUnit, cUnit->currentDalvikOffset,
- kPseudoSuspendTarget, (intptr_t)target, mir->offset);
- oatFlushAllRegs(cUnit);
- opUnconditionalBranch(cUnit, launchPad);
- oatInsertGrowableList(cUnit, &cUnit->suspendLaunchpads, (intptr_t)launchPad);
- }
+ LIR* launchPad = rawLIR(cUnit, cUnit->currentDalvikOffset,
+ kPseudoSuspendTarget, (intptr_t)target, mir->offset);
+ oatFlushAllRegs(cUnit);
+ opUnconditionalBranch(cUnit, launchPad);
+ oatInsertGrowableList(cUnit, &cUnit->suspendLaunchpads,
+ (intptr_t)launchPad);
+ }
}
} // namespace art
diff --git a/src/compiler/codegen/GenInvoke.cc b/src/compiler/codegen/GenInvoke.cc
index b986e78..6d3c240 100644
--- a/src/compiler/codegen/GenInvoke.cc
+++ b/src/compiler/codegen/GenInvoke.cc
@@ -36,97 +36,97 @@
*/
void flushIns(CompilationUnit* cUnit)
{
- /*
- * Dummy up a RegLocation for the incoming Method*
- * It will attempt to keep rARG0 live (or copy it to home location
- * if promoted).
- */
- RegLocation rlSrc = cUnit->regLocation[cUnit->methodSReg];
- RegLocation rlMethod = cUnit->regLocation[cUnit->methodSReg];
- rlSrc.location = kLocPhysReg;
- rlSrc.lowReg = rARG0;
- rlSrc.home = false;
- oatMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
- storeValue(cUnit, rlMethod, rlSrc);
- // If Method* has been promoted, explicitly flush
- if (rlMethod.location == kLocPhysReg) {
- storeWordDisp(cUnit, rSP, 0, rARG0);
- }
+ /*
+ * Dummy up a RegLocation for the incoming Method*
+ * It will attempt to keep rARG0 live (or copy it to home location
+ * if promoted).
+ */
+ RegLocation rlSrc = cUnit->regLocation[cUnit->methodSReg];
+ RegLocation rlMethod = cUnit->regLocation[cUnit->methodSReg];
+ rlSrc.location = kLocPhysReg;
+ rlSrc.lowReg = rARG0;
+ rlSrc.home = false;
+ oatMarkLive(cUnit, rlSrc.lowReg, rlSrc.sRegLow);
+ storeValue(cUnit, rlMethod, rlSrc);
+ // If Method* has been promoted, explicitly flush
+ if (rlMethod.location == kLocPhysReg) {
+ storeWordDisp(cUnit, rSP, 0, rARG0);
+ }
- if (cUnit->numIns == 0)
- return;
- const int numArgRegs = 3;
- static int argRegs[] = {rARG1, rARG2, rARG3};
- int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
- /*
- * Copy incoming arguments to their proper home locations.
- * NOTE: an older version of dx had an issue in which
- * it would reuse static method argument registers.
- * This could result in the same Dalvik virtual register
- * being promoted to both core and fp regs. To account for this,
- * we only copy to the corresponding promoted physical register
- * if it matches the type of the SSA name for the incoming
- * argument. It is also possible that long and double arguments
- * end up half-promoted. In those cases, we must flush the promoted
- * half to memory as well.
- */
- for (int i = 0; i < cUnit->numIns; i++) {
- PromotionMap* vMap = &cUnit->promotionMap[startVReg + i];
- if (i < numArgRegs) {
- // If arriving in register
- bool needFlush = true;
- RegLocation* tLoc = &cUnit->regLocation[startVReg + i];
- if ((vMap->coreLocation == kLocPhysReg) && !tLoc->fp) {
- opRegCopy(cUnit, vMap->coreReg, argRegs[i]);
- needFlush = false;
- } else if ((vMap->fpLocation == kLocPhysReg) && tLoc->fp) {
- opRegCopy(cUnit, vMap->fpReg, argRegs[i]);
- needFlush = false;
- } else {
- needFlush = true;
- }
+ if (cUnit->numIns == 0)
+ return;
+ const int numArgRegs = 3;
+ static int argRegs[] = {rARG1, rARG2, rARG3};
+ int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
+ /*
+ * Copy incoming arguments to their proper home locations.
+ * NOTE: an older version of dx had an issue in which
+ * it would reuse static method argument registers.
+ * This could result in the same Dalvik virtual register
+ * being promoted to both core and fp regs. To account for this,
+ * we only copy to the corresponding promoted physical register
+ * if it matches the type of the SSA name for the incoming
+ * argument. It is also possible that long and double arguments
+ * end up half-promoted. In those cases, we must flush the promoted
+ * half to memory as well.
+ */
+ for (int i = 0; i < cUnit->numIns; i++) {
+ PromotionMap* vMap = &cUnit->promotionMap[startVReg + i];
+ if (i < numArgRegs) {
+ // If arriving in register
+ bool needFlush = true;
+ RegLocation* tLoc = &cUnit->regLocation[startVReg + i];
+ if ((vMap->coreLocation == kLocPhysReg) && !tLoc->fp) {
+ opRegCopy(cUnit, vMap->coreReg, argRegs[i]);
+ needFlush = false;
+ } else if ((vMap->fpLocation == kLocPhysReg) && tLoc->fp) {
+ opRegCopy(cUnit, vMap->fpReg, argRegs[i]);
+ needFlush = false;
+ } else {
+ needFlush = true;
+ }
- // For wide args, force flush if only half is promoted
- if (tLoc->wide) {
- PromotionMap* pMap = vMap + (tLoc->highWord ? -1 : +1);
- needFlush |= (pMap->coreLocation != vMap->coreLocation) ||
- (pMap->fpLocation != vMap->fpLocation);
- }
- if (needFlush) {
- storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
- argRegs[i], kWord);
- }
- } else {
- // If arriving in frame & promoted
- if (vMap->coreLocation == kLocPhysReg) {
- loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
- vMap->coreReg);
- }
- if (vMap->fpLocation == kLocPhysReg) {
- loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
- vMap->fpReg);
- }
- }
+ // For wide args, force flush if only half is promoted
+ if (tLoc->wide) {
+ PromotionMap* pMap = vMap + (tLoc->highWord ? -1 : +1);
+ needFlush |= (pMap->coreLocation != vMap->coreLocation) ||
+ (pMap->fpLocation != vMap->fpLocation);
+ }
+ if (needFlush) {
+ storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
+ argRegs[i], kWord);
+ }
+ } else {
+ // If arriving in frame & promoted
+ if (vMap->coreLocation == kLocPhysReg) {
+ loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
+ vMap->coreReg);
+ }
+ if (vMap->fpLocation == kLocPhysReg) {
+ loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
+ vMap->fpReg);
+ }
}
+ }
}
void scanMethodLiteralPool(CompilationUnit* cUnit, LIR** methodTarget, LIR** codeTarget, const DexFile* dexFile, uint32_t dexMethodIdx)
{
- LIR* curTarget = cUnit->methodLiteralList;
- LIR* nextTarget = curTarget != NULL ? curTarget->next : NULL;
- while (curTarget != NULL && nextTarget != NULL) {
- if (curTarget->operands[0] == (int)dexFile &&
- nextTarget->operands[0] == (int)dexMethodIdx) {
- *codeTarget = curTarget;
- *methodTarget = nextTarget;
- DCHECK((*codeTarget)->next == *methodTarget);
- DCHECK_EQ((*codeTarget)->operands[0], (int)dexFile);
- DCHECK_EQ((*methodTarget)->operands[0], (int)dexMethodIdx);
- break;
- }
- curTarget = nextTarget->next;
- nextTarget = curTarget != NULL ? curTarget->next : NULL;
+ LIR* curTarget = cUnit->methodLiteralList;
+ LIR* nextTarget = curTarget != NULL ? curTarget->next : NULL;
+ while (curTarget != NULL && nextTarget != NULL) {
+ if (curTarget->operands[0] == (int)dexFile &&
+ nextTarget->operands[0] == (int)dexMethodIdx) {
+ *codeTarget = curTarget;
+ *methodTarget = nextTarget;
+ DCHECK((*codeTarget)->next == *methodTarget);
+ DCHECK_EQ((*codeTarget)->operands[0], (int)dexFile);
+ DCHECK_EQ((*methodTarget)->operands[0], (int)dexMethodIdx);
+ break;
}
+ curTarget = nextTarget->next;
+ nextTarget = curTarget != NULL ? curTarget->next : NULL;
+ }
}
/*
@@ -139,96 +139,99 @@
InvokeType type)
{
#if !defined(TARGET_ARM)
- directCode = 0;
- directMethod = 0;
+ directCode = 0;
+ directMethod = 0;
#endif
- if (directCode != 0 && directMethod != 0) {
- switch (state) {
- case 0: // Get the current Method* [sets rARG0]
- if (directCode != (uintptr_t)-1) {
- loadConstant(cUnit, rINVOKE_TGT, directCode);
- } else {
- LIR* dataTarget = scanLiteralPool(cUnit->codeLiteralList, dexIdx, 0);
- if (dataTarget == NULL) {
- dataTarget = addWordData(cUnit, &cUnit->codeLiteralList, dexIdx);
- dataTarget->operands[1] = type;
- }
-#if defined(TARGET_ARM)
- LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
- kThumb2LdrPcRel12, rINVOKE_TGT, 0, 0, 0, 0, dataTarget);
- oatAppendLIR(cUnit, loadPcRel);
-#else
- UNIMPLEMENTED(FATAL) << (void*)dataTarget;
-#endif
- }
- if (directMethod != (uintptr_t)-1) {
- loadConstant(cUnit, rARG0, directMethod);
- } else {
- LIR* dataTarget = scanLiteralPool(cUnit->methodLiteralList, dexIdx, 0);
- if (dataTarget == NULL) {
- dataTarget = addWordData(cUnit, &cUnit->methodLiteralList, dexIdx);
- dataTarget->operands[1] = type;
- }
-#if defined(TARGET_ARM)
- LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
- kThumb2LdrPcRel12, rARG0, 0, 0, 0, 0, dataTarget);
- oatAppendLIR(cUnit, loadPcRel);
-#else
- UNIMPLEMENTED(FATAL) << (void*)dataTarget;
-#endif
- }
- break;
- default:
- return -1;
- }
- } else {
- switch (state) {
- case 0: // Get the current Method* [sets rARG0]
- // TUNING: we can save a reg copy if Method* has been promoted
- loadCurrMethodDirect(cUnit, rARG0);
- break;
- case 1: // Get method->dex_cache_resolved_methods_
- loadWordDisp(cUnit, rARG0,
- Method::DexCacheResolvedMethodsOffset().Int32Value(),
- rARG0);
- // Set up direct code if known.
- if (directCode != 0) {
- if (directCode != (uintptr_t)-1) {
- loadConstant(cUnit, rINVOKE_TGT, directCode);
- } else {
- LIR* dataTarget = scanLiteralPool(cUnit->codeLiteralList, dexIdx, 0);
- if (dataTarget == NULL) {
- dataTarget = addWordData(cUnit, &cUnit->codeLiteralList, dexIdx);
- dataTarget->operands[1] = type;
- }
-#if defined(TARGET_ARM)
- LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
- kThumb2LdrPcRel12, rINVOKE_TGT, 0, 0, 0, 0, dataTarget);
- oatAppendLIR(cUnit, loadPcRel);
-#else
- UNIMPLEMENTED(FATAL) << (void*)dataTarget;
-#endif
- }
- }
- break;
- case 2: // Grab target method*
- loadWordDisp(cUnit, rARG0,
- Array::DataOffset(sizeof(Object*)).Int32Value() + dexIdx * 4,
- rARG0);
- break;
-#if !defined(TARGET_X86)
- case 3: // Grab the code from the method*
- if (directCode == 0) {
- loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
- rINVOKE_TGT);
- }
- break;
-#endif
- default:
- return -1;
+ if (directCode != 0 && directMethod != 0) {
+ switch (state) {
+ case 0: // Get the current Method* [sets rARG0]
+ if (directCode != (uintptr_t)-1) {
+ loadConstant(cUnit, rINVOKE_TGT, directCode);
+ } else {
+ LIR* dataTarget = scanLiteralPool(cUnit->codeLiteralList, dexIdx, 0);
+ if (dataTarget == NULL) {
+ dataTarget = addWordData(cUnit, &cUnit->codeLiteralList, dexIdx);
+ dataTarget->operands[1] = type;
}
+#if defined(TARGET_ARM)
+ LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
+ kThumb2LdrPcRel12, rINVOKE_TGT, 0, 0, 0, 0,
+ dataTarget);
+ oatAppendLIR(cUnit, loadPcRel);
+#else
+ UNIMPLEMENTED(FATAL) << (void*)dataTarget;
+#endif
+ }
+ if (directMethod != (uintptr_t)-1) {
+ loadConstant(cUnit, rARG0, directMethod);
+ } else {
+ LIR* dataTarget = scanLiteralPool(cUnit->methodLiteralList, dexIdx, 0);
+ if (dataTarget == NULL) {
+ dataTarget = addWordData(cUnit, &cUnit->methodLiteralList, dexIdx);
+ dataTarget->operands[1] = type;
+ }
+#if defined(TARGET_ARM)
+ LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
+ kThumb2LdrPcRel12, rARG0, 0, 0, 0, 0,
+ dataTarget);
+ oatAppendLIR(cUnit, loadPcRel);
+#else
+ UNIMPLEMENTED(FATAL) << (void*)dataTarget;
+#endif
+ }
+ break;
+ default:
+ return -1;
}
- return state + 1;
+ } else {
+ switch (state) {
+ case 0: // Get the current Method* [sets rARG0]
+ // TUNING: we can save a reg copy if Method* has been promoted
+ loadCurrMethodDirect(cUnit, rARG0);
+ break;
+ case 1: // Get method->dex_cache_resolved_methods_
+ loadWordDisp(cUnit, rARG0,
+ Method::DexCacheResolvedMethodsOffset().Int32Value(),
+ rARG0);
+ // Set up direct code if known.
+ if (directCode != 0) {
+ if (directCode != (uintptr_t)-1) {
+ loadConstant(cUnit, rINVOKE_TGT, directCode);
+ } else {
+ LIR* dataTarget = scanLiteralPool(cUnit->codeLiteralList, dexIdx, 0);
+ if (dataTarget == NULL) {
+ dataTarget = addWordData(cUnit, &cUnit->codeLiteralList, dexIdx);
+ dataTarget->operands[1] = type;
+ }
+#if defined(TARGET_ARM)
+ LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
+ kThumb2LdrPcRel12, rINVOKE_TGT, 0, 0, 0, 0,
+ dataTarget);
+ oatAppendLIR(cUnit, loadPcRel);
+#else
+ UNIMPLEMENTED(FATAL) << (void*)dataTarget;
+#endif
+ }
+ }
+ break;
+ case 2: // Grab target method*
+ loadWordDisp(cUnit, rARG0,
+ Array::DataOffset(sizeof(Object*)).Int32Value() + dexIdx * 4,
+ rARG0);
+ break;
+#if !defined(TARGET_X86)
+ case 3: // Grab the code from the method*
+ if (directCode == 0) {
+ loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
+ rINVOKE_TGT);
+ }
+ break;
+#endif
+ default:
+ return -1;
+ }
+ }
+ return state + 1;
}
/*
@@ -242,60 +245,59 @@
int state, uint32_t dexIdx, uint32_t methodIdx,
uintptr_t unused, uintptr_t unused2, InvokeType unused3)
{
- RegLocation rlArg;
- /*
- * This is the fast path in which the target virtual method is
- * fully resolved at compile time.
- */
- switch (state) {
- case 0: // Get "this" [set rARG1]
- rlArg = oatGetSrc(cUnit, mir, 0);
- loadValueDirectFixed(cUnit, rlArg, rARG1);
- break;
- case 1: // Is "this" null? [use rARG1]
- genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
- // get this->klass_ [use rARG1, set rINVOKE_TGT]
- loadWordDisp(cUnit, rARG1, Object::ClassOffset().Int32Value(),
- rINVOKE_TGT);
- break;
- case 2: // Get this->klass_->vtable [usr rINVOKE_TGT, set rINVOKE_TGT]
- loadWordDisp(cUnit, rINVOKE_TGT, Class::VTableOffset().Int32Value(),
- rINVOKE_TGT);
- break;
- case 3: // Get target method [use rINVOKE_TGT, set rARG0]
- loadWordDisp(cUnit, rINVOKE_TGT, (methodIdx * 4) +
- Array::DataOffset(sizeof(Object*)).Int32Value(),
- rARG0);
- break;
+ RegLocation rlArg;
+ /*
+ * This is the fast path in which the target virtual method is
+ * fully resolved at compile time.
+ */
+ switch (state) {
+ case 0: // Get "this" [set rARG1]
+ rlArg = oatGetSrc(cUnit, mir, 0);
+ loadValueDirectFixed(cUnit, rlArg, rARG1);
+ break;
+ case 1: // Is "this" null? [use rARG1]
+ genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
+ // get this->klass_ [use rARG1, set rINVOKE_TGT]
+ loadWordDisp(cUnit, rARG1, Object::ClassOffset().Int32Value(),
+ rINVOKE_TGT);
+ break;
+ case 2: // Get this->klass_->vtable [usr rINVOKE_TGT, set rINVOKE_TGT]
+ loadWordDisp(cUnit, rINVOKE_TGT, Class::VTableOffset().Int32Value(),
+ rINVOKE_TGT);
+ break;
+ case 3: // Get target method [use rINVOKE_TGT, set rARG0]
+ loadWordDisp(cUnit, rINVOKE_TGT, (methodIdx * 4) +
+ Array::DataOffset(sizeof(Object*)).Int32Value(), rARG0);
+ break;
#if !defined(TARGET_X86)
- case 4: // Get the compiled code address [uses rARG0, sets rINVOKE_TGT]
- loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
- rINVOKE_TGT);
- break;
+ case 4: // Get the compiled code address [uses rARG0, sets rINVOKE_TGT]
+ loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
+ rINVOKE_TGT);
+ break;
#endif
- default:
- return -1;
- }
- return state + 1;
+ default:
+ return -1;
+ }
+ return state + 1;
}
int nextInvokeInsnSP(CompilationUnit* cUnit, MIR* mir, int trampoline,
int state, uint32_t dexIdx, uint32_t methodIdx)
{
- /*
- * This handles the case in which the base method is not fully
- * resolved at compile time, we bail to a runtime helper.
- */
- if (state == 0) {
+ /*
+ * This handles the case in which the base method is not fully
+ * resolved at compile time, we bail to a runtime helper.
+ */
+ if (state == 0) {
#if !defined(TARGET_X86)
- // Load trampoline target
- loadWordDisp(cUnit, rSELF, trampoline, rINVOKE_TGT);
+ // Load trampoline target
+ loadWordDisp(cUnit, rSELF, trampoline, rINVOKE_TGT);
#endif
- // Load rARG0 with method index
- loadConstant(cUnit, rARG0, dexIdx);
- return 1;
- }
- return -1;
+ // Load rARG0 with method index
+ loadConstant(cUnit, rARG0, dexIdx);
+ return 1;
+ }
+ return -1;
}
int nextStaticCallInsnSP(CompilationUnit* cUnit, MIR* mir,
@@ -357,28 +359,28 @@
uint32_t methodIdx, uintptr_t directCode,
uintptr_t directMethod, InvokeType type, bool skipThis)
{
- int lastArgReg = rARG3;
- int nextReg = rARG1;
- int nextArg = 0;
- if (skipThis) {
- nextReg++;
- nextArg++;
+ int lastArgReg = rARG3;
+ int nextReg = rARG1;
+ int nextArg = 0;
+ if (skipThis) {
+ nextReg++;
+ nextArg++;
+ }
+ for (; (nextReg <= lastArgReg) && (nextArg < mir->ssaRep->numUses); nextReg++) {
+ RegLocation rlArg = oatGetRawSrc(cUnit, mir, nextArg++);
+ rlArg = oatUpdateRawLoc(cUnit, rlArg);
+ if (rlArg.wide && (nextReg <= rARG2)) {
+ loadValueDirectWideFixed(cUnit, rlArg, nextReg, nextReg + 1);
+ nextReg++;
+ nextArg++;
+ } else {
+ rlArg.wide = false;
+ loadValueDirectFixed(cUnit, rlArg, nextReg);
}
- for (; (nextReg <= lastArgReg) && (nextArg < mir->ssaRep->numUses); nextReg++) {
- RegLocation rlArg = oatGetRawSrc(cUnit, mir, nextArg++);
- rlArg = oatUpdateRawLoc(cUnit, rlArg);
- if (rlArg.wide && (nextReg <= rARG2)) {
- loadValueDirectWideFixed(cUnit, rlArg, nextReg, nextReg + 1);
- nextReg++;
- nextArg++;
- } else {
- rlArg.wide = false;
- loadValueDirectFixed(cUnit, rlArg, nextReg);
- }
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- }
- return callState;
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
+ directCode, directMethod, type);
+ }
+ return callState;
}
/*
@@ -395,86 +397,83 @@
uintptr_t directCode, uintptr_t directMethod,
InvokeType type, bool skipThis)
{
- RegLocation rlArg;
+ RegLocation rlArg;
- /* If no arguments, just return */
- if (dInsn->vA == 0)
- return callState;
-
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
-
- DCHECK_LE(dInsn->vA, 5U);
- if (dInsn->vA > 3) {
- uint32_t nextUse = 3;
- //Detect special case of wide arg spanning arg3/arg4
- RegLocation rlUse0 = oatGetRawSrc(cUnit, mir, 0);
- RegLocation rlUse1 = oatGetRawSrc(cUnit, mir, 1);
- RegLocation rlUse2 = oatGetRawSrc(cUnit, mir, 2);
- if (((!rlUse0.wide && !rlUse1.wide) || rlUse0.wide) &&
- rlUse2.wide) {
- int reg = -1;
- // Wide spans, we need the 2nd half of uses[2].
- rlArg = oatUpdateLocWide(cUnit, rlUse2);
- if (rlArg.location == kLocPhysReg) {
- reg = rlArg.highReg;
- } else {
- // rARG2 & rARG3 can safely be used here
- reg = rARG3;
- loadWordDisp(cUnit, rSP,
- oatSRegOffset(cUnit, rlArg.sRegLow) + 4, reg);
- callState = nextCallInsn(cUnit, mir, callState, dexIdx,
- methodIdx, directCode, directMethod,
- type);
- }
- storeBaseDisp(cUnit, rSP, (nextUse + 1) * 4, reg, kWord);
- storeBaseDisp(cUnit, rSP, 16 /* (3+1)*4 */, reg, kWord);
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- nextUse++;
- }
- // Loop through the rest
- while (nextUse < dInsn->vA) {
- int lowReg;
- int highReg = -1;
- rlArg = oatGetRawSrc(cUnit, mir, nextUse);
- rlArg = oatUpdateRawLoc(cUnit, rlArg);
- if (rlArg.location == kLocPhysReg) {
- lowReg = rlArg.lowReg;
- highReg = rlArg.highReg;
- } else {
- lowReg = rARG2;
- if (rlArg.wide) {
- highReg = rARG3;
- loadValueDirectWideFixed(cUnit, rlArg, lowReg, highReg);
- } else {
- loadValueDirectFixed(cUnit, rlArg, lowReg);
- }
- callState = nextCallInsn(cUnit, mir, callState, dexIdx,
- methodIdx, directCode, directMethod,
- type);
- }
- int outsOffset = (nextUse + 1) * 4;
- if (rlArg.wide) {
- storeBaseDispWide(cUnit, rSP, outsOffset, lowReg, highReg);
- nextUse += 2;
- } else {
- storeWordDisp(cUnit, rSP, outsOffset, lowReg);
- nextUse++;
- }
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- }
- }
-
- callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
- dexIdx, methodIdx, directCode, directMethod,
- type, skipThis);
-
- if (pcrLabel) {
- *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
- }
+ /* If no arguments, just return */
+ if (dInsn->vA == 0)
return callState;
+
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
+ directCode, directMethod, type);
+
+ DCHECK_LE(dInsn->vA, 5U);
+ if (dInsn->vA > 3) {
+ uint32_t nextUse = 3;
+ //Detect special case of wide arg spanning arg3/arg4
+ RegLocation rlUse0 = oatGetRawSrc(cUnit, mir, 0);
+ RegLocation rlUse1 = oatGetRawSrc(cUnit, mir, 1);
+ RegLocation rlUse2 = oatGetRawSrc(cUnit, mir, 2);
+ if (((!rlUse0.wide && !rlUse1.wide) || rlUse0.wide) &&
+ rlUse2.wide) {
+ int reg = -1;
+ // Wide spans, we need the 2nd half of uses[2].
+ rlArg = oatUpdateLocWide(cUnit, rlUse2);
+ if (rlArg.location == kLocPhysReg) {
+ reg = rlArg.highReg;
+ } else {
+ // rARG2 & rARG3 can safely be used here
+ reg = rARG3;
+ loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, rlArg.sRegLow) + 4, reg);
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx,
+ methodIdx, directCode, directMethod, type);
+ }
+ storeBaseDisp(cUnit, rSP, (nextUse + 1) * 4, reg, kWord);
+ storeBaseDisp(cUnit, rSP, 16 /* (3+1)*4 */, reg, kWord);
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
+ directCode, directMethod, type);
+ nextUse++;
+ }
+ // Loop through the rest
+ while (nextUse < dInsn->vA) {
+ int lowReg;
+ int highReg = -1;
+ rlArg = oatGetRawSrc(cUnit, mir, nextUse);
+ rlArg = oatUpdateRawLoc(cUnit, rlArg);
+ if (rlArg.location == kLocPhysReg) {
+ lowReg = rlArg.lowReg;
+ highReg = rlArg.highReg;
+ } else {
+ lowReg = rARG2;
+ if (rlArg.wide) {
+ highReg = rARG3;
+ loadValueDirectWideFixed(cUnit, rlArg, lowReg, highReg);
+ } else {
+ loadValueDirectFixed(cUnit, rlArg, lowReg);
+ }
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx,
+ methodIdx, directCode, directMethod, type);
+ }
+ int outsOffset = (nextUse + 1) * 4;
+ if (rlArg.wide) {
+ storeBaseDispWide(cUnit, rSP, outsOffset, lowReg, highReg);
+ nextUse += 2;
+ } else {
+ storeWordDisp(cUnit, rSP, outsOffset, lowReg);
+ nextUse++;
+ }
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
+ directCode, directMethod, type);
+ }
+ }
+
+ callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
+ dexIdx, methodIdx, directCode, directMethod,
+ type, skipThis);
+
+ if (pcrLabel) {
+ *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
+ }
+ return callState;
}
/*
@@ -499,297 +498,295 @@
uintptr_t directCode, uintptr_t directMethod,
InvokeType type, bool skipThis)
{
- int firstArg = dInsn->vC;
- int numArgs = dInsn->vA;
+ int firstArg = dInsn->vC;
+ int numArgs = dInsn->vA;
- // If we can treat it as non-range (Jumbo ops will use range form)
- if (numArgs <= 5)
- return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
- nextCallInsn, dexIdx, methodIdx,
- directCode, directMethod, type, skipThis);
- /*
- * Make sure range list doesn't span the break between in normal
- * Dalvik vRegs and the ins.
- */
- int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
- int boundaryReg = cUnit->numDalvikRegisters - cUnit->numIns;
- if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
- LOG(FATAL) << "Argument list spanned locals & args";
+ // If we can treat it as non-range (Jumbo ops will use range form)
+ if (numArgs <= 5)
+ return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
+ nextCallInsn, dexIdx, methodIdx,
+ directCode, directMethod, type, skipThis);
+ /*
+ * Make sure range list doesn't span the break between in normal
+ * Dalvik vRegs and the ins.
+ */
+ int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
+ int boundaryReg = cUnit->numDalvikRegisters - cUnit->numIns;
+ if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
+ LOG(FATAL) << "Argument list spanned locals & args";
+ }
+
+ /*
+ * First load the non-register arguments. Both forms expect all
+ * of the source arguments to be in their home frame location, so
+ * scan the sReg names and flush any that have been promoted to
+ * frame backing storage.
+ */
+ // Scan the rest of the args - if in physReg flush to memory
+ for (int nextArg = 0; nextArg < numArgs;) {
+ RegLocation loc = oatGetRawSrc(cUnit, mir, nextArg);
+ if (loc.wide) {
+ loc = oatUpdateLocWide(cUnit, loc);
+ if ((nextArg >= 2) && (loc.location == kLocPhysReg)) {
+ storeBaseDispWide(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
+ loc.lowReg, loc.highReg);
+ }
+ nextArg += 2;
+ } else {
+ loc = oatUpdateLoc(cUnit, loc);
+ if ((nextArg >= 3) && (loc.location == kLocPhysReg)) {
+ storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
+ loc.lowReg, kWord);
+ }
+ nextArg++;
}
+ }
- /*
- * First load the non-register arguments. Both forms expect all
- * of the source arguments to be in their home frame location, so
- * scan the sReg names and flush any that have been promoted to
- * frame backing storage.
- */
- // Scan the rest of the args - if in physReg flush to memory
- for (int nextArg = 0; nextArg < numArgs;) {
- RegLocation loc = oatGetRawSrc(cUnit, mir, nextArg);
- if (loc.wide) {
- loc = oatUpdateLocWide(cUnit, loc);
- if ((nextArg >= 2) && (loc.location == kLocPhysReg)) {
- storeBaseDispWide(cUnit, rSP,
- oatSRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, loc.highReg);
- }
- nextArg += 2;
- } else {
- loc = oatUpdateLoc(cUnit, loc);
- if ((nextArg >= 3) && (loc.location == kLocPhysReg)) {
- storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, kWord);
- }
- nextArg++;
- }
- }
-
- int startOffset = oatSRegOffset(cUnit,
- cUnit->regLocation[mir->ssaRep->uses[3]].sRegLow);
- int outsOffset = 4 /* Method* */ + (3 * 4);
+ int startOffset = oatSRegOffset(cUnit,
+ cUnit->regLocation[mir->ssaRep->uses[3]].sRegLow);
+ int outsOffset = 4 /* Method* */ + (3 * 4);
#if defined(TARGET_MIPS) || defined(TARGET_X86)
+ // Generate memcpy
+ opRegRegImm(cUnit, kOpAdd, rARG0, rSP, outsOffset);
+ opRegRegImm(cUnit, kOpAdd, rARG1, rSP, startOffset);
+ callRuntimeHelperRegRegImm(cUnit, ENTRYPOINT_OFFSET(pMemcpy),
+ rARG0, rARG1, (numArgs - 3) * 4);
+#else
+ if (numArgs >= 20) {
// Generate memcpy
opRegRegImm(cUnit, kOpAdd, rARG0, rSP, outsOffset);
opRegRegImm(cUnit, kOpAdd, rARG1, rSP, startOffset);
callRuntimeHelperRegRegImm(cUnit, ENTRYPOINT_OFFSET(pMemcpy),
rARG0, rARG1, (numArgs - 3) * 4);
-#else
- if (numArgs >= 20) {
- // Generate memcpy
- opRegRegImm(cUnit, kOpAdd, rARG0, rSP, outsOffset);
- opRegRegImm(cUnit, kOpAdd, rARG1, rSP, startOffset);
- callRuntimeHelperRegRegImm(cUnit, ENTRYPOINT_OFFSET(pMemcpy),
- rARG0, rARG1, (numArgs - 3) * 4);
- } else {
- // Use vldm/vstm pair using rARG3 as a temp
- int regsLeft = std::min(numArgs - 3, 16);
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- opRegRegImm(cUnit, kOpAdd, rARG3, rSP, startOffset);
- LIR* ld = newLIR3(cUnit, kThumb2Vldms, rARG3, fr0, regsLeft);
- //TUNING: loosen barrier
- ld->defMask = ENCODE_ALL;
- setMemRefType(ld, true /* isLoad */, kDalvikReg);
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- opRegRegImm(cUnit, kOpAdd, rARG3, rSP, 4 /* Method* */ + (3 * 4));
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
- LIR* st = newLIR3(cUnit, kThumb2Vstms, rARG3, fr0, regsLeft);
- setMemRefType(st, false /* isLoad */, kDalvikReg);
- st->defMask = ENCODE_ALL;
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
- directCode, directMethod, type);
-
- }
-#endif
-
- callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
- dexIdx, methodIdx, directCode, directMethod,
- type, skipThis);
-
+ } else {
+ // Use vldm/vstm pair using rARG3 as a temp
+ int regsLeft = std::min(numArgs - 3, 16);
callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
directCode, directMethod, type);
- if (pcrLabel) {
- *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
- }
- return callState;
+ opRegRegImm(cUnit, kOpAdd, rARG3, rSP, startOffset);
+ LIR* ld = newLIR3(cUnit, kThumb2Vldms, rARG3, fr0, regsLeft);
+ //TUNING: loosen barrier
+ ld->defMask = ENCODE_ALL;
+ setMemRefType(ld, true /* isLoad */, kDalvikReg);
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
+ directCode, directMethod, type);
+ opRegRegImm(cUnit, kOpAdd, rARG3, rSP, 4 /* Method* */ + (3 * 4));
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
+ directCode, directMethod, type);
+ LIR* st = newLIR3(cUnit, kThumb2Vstms, rARG3, fr0, regsLeft);
+ setMemRefType(st, false /* isLoad */, kDalvikReg);
+ st->defMask = ENCODE_ALL;
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
+ directCode, directMethod, type);
+
+ }
+#endif
+
+ callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
+ dexIdx, methodIdx, directCode, directMethod,
+ type, skipThis);
+
+ callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx,
+ directCode, directMethod, type);
+ if (pcrLabel) {
+ *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
+ }
+ return callState;
}
RegLocation inlineTarget(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
{
- RegLocation res;
- mir = oatFindMoveResult(cUnit, bb, mir, false);
- if (mir == NULL) {
- res = oatGetReturn(cUnit, false);
- } else {
- res = oatGetDest(cUnit, mir, 0);
- mir->dalvikInsn.opcode = Instruction::NOP;
- }
- return res;
+ RegLocation res;
+ mir = oatFindMoveResult(cUnit, bb, mir, false);
+ if (mir == NULL) {
+ res = oatGetReturn(cUnit, false);
+ } else {
+ res = oatGetDest(cUnit, mir, 0);
+ mir->dalvikInsn.opcode = Instruction::NOP;
+ }
+ return res;
}
RegLocation inlineTargetWide(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
{
- RegLocation res;
- mir = oatFindMoveResult(cUnit, bb, mir, true);
- if (mir == NULL) {
- res = oatGetReturnWide(cUnit, false);
- } else {
- res = oatGetDestWide(cUnit, mir, 0, 1);
- mir->dalvikInsn.opcode = Instruction::NOP;
- }
- return res;
+ RegLocation res;
+ mir = oatFindMoveResult(cUnit, bb, mir, true);
+ if (mir == NULL) {
+ res = oatGetReturnWide(cUnit, false);
+ } else {
+ res = oatGetDestWide(cUnit, mir, 0, 1);
+ mir->dalvikInsn.opcode = Instruction::NOP;
+ }
+ return res;
}
bool genInlinedCharAt(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- InvokeType type, bool isRange)
+ InvokeType type, bool isRange)
{
#if defined(TARGET_ARM)
- // Location of reference to data array
- int valueOffset = String::ValueOffset().Int32Value();
- // Location of count
- int countOffset = String::CountOffset().Int32Value();
- // Starting offset within data array
- int offsetOffset = String::OffsetOffset().Int32Value();
- // Start of char data with array_
- int dataOffset = Array::DataOffset(sizeof(uint16_t)).Int32Value();
+ // Location of reference to data array
+ int valueOffset = String::ValueOffset().Int32Value();
+ // Location of count
+ int countOffset = String::CountOffset().Int32Value();
+ // Starting offset within data array
+ int offsetOffset = String::OffsetOffset().Int32Value();
+ // Start of char data with array_
+ int dataOffset = Array::DataOffset(sizeof(uint16_t)).Int32Value();
- RegLocation rlObj = oatGetSrc(cUnit, mir, 0);
- RegLocation rlIdx = oatGetSrc(cUnit, mir, 1);
- rlObj = loadValue(cUnit, rlObj, kCoreReg);
- rlIdx = loadValue(cUnit, rlIdx, kCoreReg);
- int regMax;
- int regOff = oatAllocTemp(cUnit);
- int regPtr = oatAllocTemp(cUnit);
- genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);
- bool rangeCheck = (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK));
- if (rangeCheck) {
- regMax = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rlObj.lowReg, countOffset, regMax);
- }
- loadWordDisp(cUnit, rlObj.lowReg, offsetOffset, regOff);
- loadWordDisp(cUnit, rlObj.lowReg, valueOffset, regPtr);
- LIR* launchPad = NULL;
- if (rangeCheck) {
- // Set up a launch pad to allow retry in case of bounds violation */
- launchPad = rawLIR(cUnit, 0, kPseudoIntrinsicRetry, (int)mir, type);
- oatInsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads,
- (intptr_t)launchPad);
- opRegReg(cUnit, kOpCmp, rlIdx.lowReg, regMax);
- oatFreeTemp(cUnit, regMax);
- opCondBranch(cUnit, kCondCs, launchPad);
- }
- opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
- opRegReg(cUnit, kOpAdd, regOff, rlIdx.lowReg);
- RegLocation rlDest = inlineTarget(cUnit, bb, mir);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- loadBaseIndexed(cUnit, regPtr, regOff, rlResult.lowReg, 1, kUnsignedHalf);
- oatFreeTemp(cUnit, regOff);
- oatFreeTemp(cUnit, regPtr);
- storeValue(cUnit, rlDest, rlResult);
- if (rangeCheck) {
- launchPad->operands[2] = NULL; // no resumption
- launchPad->operands[3] = (uintptr_t)bb;
- }
- // Record that we've already inlined & null checked
- mir->optimizationFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
- return true;
+ RegLocation rlObj = oatGetSrc(cUnit, mir, 0);
+ RegLocation rlIdx = oatGetSrc(cUnit, mir, 1);
+ rlObj = loadValue(cUnit, rlObj, kCoreReg);
+ rlIdx = loadValue(cUnit, rlIdx, kCoreReg);
+ int regMax;
+ int regOff = oatAllocTemp(cUnit);
+ int regPtr = oatAllocTemp(cUnit);
+ genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);
+ bool rangeCheck = (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK));
+ if (rangeCheck) {
+ regMax = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rlObj.lowReg, countOffset, regMax);
+ }
+ loadWordDisp(cUnit, rlObj.lowReg, offsetOffset, regOff);
+ loadWordDisp(cUnit, rlObj.lowReg, valueOffset, regPtr);
+ LIR* launchPad = NULL;
+ if (rangeCheck) {
+ // Set up a launch pad to allow retry in case of bounds violation */
+ launchPad = rawLIR(cUnit, 0, kPseudoIntrinsicRetry, (int)mir, type);
+ oatInsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads,
+ (intptr_t)launchPad);
+ opRegReg(cUnit, kOpCmp, rlIdx.lowReg, regMax);
+ oatFreeTemp(cUnit, regMax);
+ opCondBranch(cUnit, kCondCs, launchPad);
+ }
+ opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
+ opRegReg(cUnit, kOpAdd, regOff, rlIdx.lowReg);
+ RegLocation rlDest = inlineTarget(cUnit, bb, mir);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ loadBaseIndexed(cUnit, regPtr, regOff, rlResult.lowReg, 1, kUnsignedHalf);
+ oatFreeTemp(cUnit, regOff);
+ oatFreeTemp(cUnit, regPtr);
+ storeValue(cUnit, rlDest, rlResult);
+ if (rangeCheck) {
+ launchPad->operands[2] = NULL; // no resumption
+ launchPad->operands[3] = (uintptr_t)bb;
+ }
+ // Record that we've already inlined & null checked
+ mir->optimizationFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ return true;
#else
- return false;
+ return false;
#endif
}
bool genInlinedMinMaxInt(CompilationUnit *cUnit, BasicBlock* bb, MIR *mir,
- bool isMin)
+ bool isMin)
{
#if defined(TARGET_ARM)
- RegLocation rlSrc1 = oatGetSrc(cUnit, mir, 0);
- RegLocation rlSrc2 = oatGetSrc(cUnit, mir, 1);
- rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
- RegLocation rlDest = inlineTarget(cUnit, bb, mir);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- opIT(cUnit, (isMin) ? kArmCondGt : kArmCondLt, "E");
- opRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc2.lowReg);
- opRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc1.lowReg);
- genBarrier(cUnit);
- storeValue(cUnit, rlDest, rlResult);
- return true;
+ RegLocation rlSrc1 = oatGetSrc(cUnit, mir, 0);
+ RegLocation rlSrc2 = oatGetSrc(cUnit, mir, 1);
+ rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+ RegLocation rlDest = inlineTarget(cUnit, bb, mir);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
+ opIT(cUnit, (isMin) ? kArmCondGt : kArmCondLt, "E");
+ opRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc2.lowReg);
+ opRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc1.lowReg);
+ genBarrier(cUnit);
+ storeValue(cUnit, rlDest, rlResult);
+ return true;
#else
- return false;
+ return false;
#endif
}
// Generates an inlined String.isEmpty or String.length.
bool genInlinedStringIsEmptyOrLength(CompilationUnit* cUnit,
- BasicBlock* bb, MIR* mir,
- bool isEmpty)
+ BasicBlock* bb, MIR* mir, bool isEmpty)
{
#if defined(TARGET_ARM)
- // dst = src.length();
- RegLocation rlObj = oatGetSrc(cUnit, mir, 0);
- rlObj = loadValue(cUnit, rlObj, kCoreReg);
- RegLocation rlDest = inlineTarget(cUnit, bb, mir);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);
- loadWordDisp(cUnit, rlObj.lowReg, String::CountOffset().Int32Value(),
- rlResult.lowReg);
- if (isEmpty) {
- // dst = (dst == 0);
- int tReg = oatAllocTemp(cUnit);
- opRegReg(cUnit, kOpNeg, tReg, rlResult.lowReg);
- opRegRegReg(cUnit, kOpAdc, rlResult.lowReg, rlResult.lowReg, tReg);
- }
- storeValue(cUnit, rlDest, rlResult);
- return true;
+ // dst = src.length();
+ RegLocation rlObj = oatGetSrc(cUnit, mir, 0);
+ rlObj = loadValue(cUnit, rlObj, kCoreReg);
+ RegLocation rlDest = inlineTarget(cUnit, bb, mir);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);
+ loadWordDisp(cUnit, rlObj.lowReg, String::CountOffset().Int32Value(),
+ rlResult.lowReg);
+ if (isEmpty) {
+ // dst = (dst == 0);
+ int tReg = oatAllocTemp(cUnit);
+ opRegReg(cUnit, kOpNeg, tReg, rlResult.lowReg);
+ opRegRegReg(cUnit, kOpAdc, rlResult.lowReg, rlResult.lowReg, tReg);
+ }
+ storeValue(cUnit, rlDest, rlResult);
+ return true;
#else
- return false;
+ return false;
#endif
}
bool genInlinedAbsInt(CompilationUnit *cUnit, BasicBlock* bb, MIR *mir)
{
#if defined(TARGET_ARM)
- RegLocation rlSrc = oatGetSrc(cUnit, mir, 0);
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlDest = inlineTarget(cUnit, bb, mir);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- int signReg = oatAllocTemp(cUnit);
- // abs(x) = y<=x>>31, (x+y)^y.
- opRegRegImm(cUnit, kOpAsr, signReg, rlSrc.lowReg, 31);
- opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
- opRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
- storeValue(cUnit, rlDest, rlResult);
- return true;
+ RegLocation rlSrc = oatGetSrc(cUnit, mir, 0);
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ RegLocation rlDest = inlineTarget(cUnit, bb, mir);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ int signReg = oatAllocTemp(cUnit);
+ // abs(x) = y<=x>>31, (x+y)^y.
+ opRegRegImm(cUnit, kOpAsr, signReg, rlSrc.lowReg, 31);
+ opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
+ opRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
+ storeValue(cUnit, rlDest, rlResult);
+ return true;
#else
- return false;
+ return false;
#endif
}
bool genInlinedAbsLong(CompilationUnit *cUnit, BasicBlock* bb, MIR *mir)
{
#if defined(TARGET_ARM)
- RegLocation rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
- RegLocation rlDest = inlineTargetWide(cUnit, bb, mir);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- int signReg = oatAllocTemp(cUnit);
- // abs(x) = y<=x>>31, (x+y)^y.
- opRegRegImm(cUnit, kOpAsr, signReg, rlSrc.highReg, 31);
- opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
- opRegRegReg(cUnit, kOpAdc, rlResult.highReg, rlSrc.highReg, signReg);
- opRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
- opRegReg(cUnit, kOpXor, rlResult.highReg, signReg);
- storeValueWide(cUnit, rlDest, rlResult);
- return true;
+ RegLocation rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ RegLocation rlDest = inlineTargetWide(cUnit, bb, mir);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ int signReg = oatAllocTemp(cUnit);
+ // abs(x) = y<=x>>31, (x+y)^y.
+ opRegRegImm(cUnit, kOpAsr, signReg, rlSrc.highReg, 31);
+ opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
+ opRegRegReg(cUnit, kOpAdc, rlResult.highReg, rlSrc.highReg, signReg);
+ opRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
+ opRegReg(cUnit, kOpXor, rlResult.highReg, signReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+ return true;
#else
- return false;
+ return false;
#endif
}
bool genInlinedFloatCvt(CompilationUnit *cUnit, BasicBlock* bb, MIR *mir)
{
#if defined(TARGET_ARM)
- RegLocation rlSrc = oatGetSrc(cUnit, mir, 0);
- RegLocation rlDest = inlineTarget(cUnit, bb, mir);
- storeValue(cUnit, rlDest, rlSrc);
- return true;
+ RegLocation rlSrc = oatGetSrc(cUnit, mir, 0);
+ RegLocation rlDest = inlineTarget(cUnit, bb, mir);
+ storeValue(cUnit, rlDest, rlSrc);
+ return true;
#else
- return false;
+ return false;
#endif
}
bool genInlinedDoubleCvt(CompilationUnit *cUnit, BasicBlock* bb, MIR *mir)
{
#if defined(TARGET_ARM)
- RegLocation rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- RegLocation rlDest = inlineTargetWide(cUnit, bb, mir);
- storeValueWide(cUnit, rlDest, rlSrc);
- return true;
+ RegLocation rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ RegLocation rlDest = inlineTargetWide(cUnit, bb, mir);
+ storeValueWide(cUnit, rlDest, rlSrc);
+ return true;
#else
- return false;
+ return false;
#endif
}
@@ -802,37 +799,37 @@
{
#if defined(TARGET_ARM)
- oatClobberCalleeSave(cUnit);
- oatLockCallTemps(cUnit); // Using fixed registers
- int regPtr = rARG0;
- int regChar = rARG1;
- int regStart = rARG2;
+ oatClobberCalleeSave(cUnit);
+ oatLockCallTemps(cUnit); // Using fixed registers
+ int regPtr = rARG0;
+ int regChar = rARG1;
+ int regStart = rARG2;
- RegLocation rlObj = oatGetSrc(cUnit, mir, 0);
- RegLocation rlChar = oatGetSrc(cUnit, mir, 1);
- RegLocation rlStart = oatGetSrc(cUnit, mir, 2);
- loadValueDirectFixed(cUnit, rlObj, regPtr);
- loadValueDirectFixed(cUnit, rlChar, regChar);
- if (zeroBased) {
- loadConstant(cUnit, regStart, 0);
- } else {
- loadValueDirectFixed(cUnit, rlStart, regStart);
- }
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pIndexOf));
- genNullCheck(cUnit, rlObj.sRegLow, regPtr, mir);
- LIR* launchPad = rawLIR(cUnit, 0, kPseudoIntrinsicRetry, (int)mir, type);
- oatInsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads,
- (intptr_t)launchPad);
- opCmpImmBranch(cUnit, kCondGt, regChar, 0xFFFF, launchPad);
- opReg(cUnit, kOpBlx, rTgt);
- LIR* resumeTgt = newLIR0(cUnit, kPseudoTargetLabel);
- launchPad->operands[2] = (uintptr_t)resumeTgt;
- launchPad->operands[3] = (uintptr_t)bb;
- // Record that we've already inlined & null checked
- mir->optimizationFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
- return true;
+ RegLocation rlObj = oatGetSrc(cUnit, mir, 0);
+ RegLocation rlChar = oatGetSrc(cUnit, mir, 1);
+ RegLocation rlStart = oatGetSrc(cUnit, mir, 2);
+ loadValueDirectFixed(cUnit, rlObj, regPtr);
+ loadValueDirectFixed(cUnit, rlChar, regChar);
+ if (zeroBased) {
+ loadConstant(cUnit, regStart, 0);
+ } else {
+ loadValueDirectFixed(cUnit, rlStart, regStart);
+ }
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pIndexOf));
+ genNullCheck(cUnit, rlObj.sRegLow, regPtr, mir);
+ LIR* launchPad = rawLIR(cUnit, 0, kPseudoIntrinsicRetry, (int)mir, type);
+ oatInsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads,
+ (intptr_t)launchPad);
+ opCmpImmBranch(cUnit, kCondGt, regChar, 0xFFFF, launchPad);
+ opReg(cUnit, kOpBlx, rTgt);
+ LIR* resumeTgt = newLIR0(cUnit, kPseudoTargetLabel);
+ launchPad->operands[2] = (uintptr_t)resumeTgt;
+ launchPad->operands[3] = (uintptr_t)bb;
+ // Record that we've already inlined & null checked
+ mir->optimizationFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ return true;
#else
- return false;
+ return false;
#endif
}
@@ -841,93 +838,93 @@
MIR* mir, InvokeType type)
{
#if defined(TARGET_ARM)
- oatClobberCalleeSave(cUnit);
- oatLockCallTemps(cUnit); // Using fixed registers
- int regThis = rARG0;
- int regCmp = rARG1;
+ oatClobberCalleeSave(cUnit);
+ oatLockCallTemps(cUnit); // Using fixed registers
+ int regThis = rARG0;
+ int regCmp = rARG1;
- RegLocation rlThis = oatGetSrc(cUnit, mir, 0);
- RegLocation rlCmp = oatGetSrc(cUnit, mir, 1);
- loadValueDirectFixed(cUnit, rlThis, regThis);
- loadValueDirectFixed(cUnit, rlCmp, regCmp);
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pStringCompareTo));
- genNullCheck(cUnit, rlThis.sRegLow, regThis, mir);
- //TUNING: check if rlCmp.sRegLow is already null checked
- LIR* launchPad = rawLIR(cUnit, 0, kPseudoIntrinsicRetry, (int)mir, type);
- oatInsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads,
- (intptr_t)launchPad);
- opCmpImmBranch(cUnit, kCondEq, regCmp, 0, launchPad);
- opReg(cUnit, kOpBlx, rTgt);
- launchPad->operands[2] = NULL; // No return possible
- launchPad->operands[3] = (uintptr_t)bb;
- // Record that we've already inlined & null checked
- mir->optimizationFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
- return true;
+ RegLocation rlThis = oatGetSrc(cUnit, mir, 0);
+ RegLocation rlCmp = oatGetSrc(cUnit, mir, 1);
+ loadValueDirectFixed(cUnit, rlThis, regThis);
+ loadValueDirectFixed(cUnit, rlCmp, regCmp);
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pStringCompareTo));
+ genNullCheck(cUnit, rlThis.sRegLow, regThis, mir);
+ //TUNING: check if rlCmp.sRegLow is already null checked
+ LIR* launchPad = rawLIR(cUnit, 0, kPseudoIntrinsicRetry, (int)mir, type);
+ oatInsertGrowableList(cUnit, &cUnit->intrinsicLaunchpads,
+ (intptr_t)launchPad);
+ opCmpImmBranch(cUnit, kCondEq, regCmp, 0, launchPad);
+ opReg(cUnit, kOpBlx, rTgt);
+ launchPad->operands[2] = NULL; // No return possible
+ launchPad->operands[3] = (uintptr_t)bb;
+ // Record that we've already inlined & null checked
+ mir->optimizationFlags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ return true;
#else
- return false;
+ return false;
#endif
}
bool genIntrinsic(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
InvokeType type, bool isRange)
{
- if ((mir->optimizationFlags & MIR_INLINED) || isRange) {
- return false;
- }
- /*
- * TODO: move these to a target-specific structured constant array
- * and use a generic match function. The list of intrinsics may be
- * slightly different depending on target.
- * TODO: Fold this into a matching function that runs during
- * basic block building. This should be part of the action for
- * small method inlining and recognition of the special object init
- * method. By doing this during basic block construction, we can also
- * take advantage of/generate new useful dataflow info.
- */
- std::string tgtMethod(PrettyMethod(mir->dalvikInsn.vB, *cUnit->dex_file));
- if (tgtMethod.compare("char java.lang.String.charAt(int)") == 0) {
- return genInlinedCharAt(cUnit, bb, mir, type, isRange);
- }
- if (tgtMethod.compare("int java.lang.Math.min(int, int)") == 0) {
- return genInlinedMinMaxInt(cUnit, bb, mir, true /* isMin */);
- }
- if (tgtMethod.compare("int java.lang.Math.max(int, int)") == 0) {
- return genInlinedMinMaxInt(cUnit, bb, mir, false /* isMin */);
- }
- if (tgtMethod.compare("int java.lang.String.length()") == 0) {
- return genInlinedStringIsEmptyOrLength(cUnit, bb, mir, false /* isEmpty */);
- }
- if (tgtMethod.compare("boolean java.lang.String.isEmpty()") == 0) {
- return genInlinedStringIsEmptyOrLength(cUnit, bb, mir, true /* isEmpty */);
- }
- if (tgtMethod.compare("int java.lang.Math.abs(int)") == 0) {
- return genInlinedAbsInt(cUnit, bb, mir);
- }
- if (tgtMethod.compare("long java.lang.Math.abs(long)") == 0) {
- return genInlinedAbsLong(cUnit, bb, mir);
- }
- if (tgtMethod.compare("int java.lang.Float.floatToRawIntBits(float)") == 0) {
- return genInlinedFloatCvt(cUnit, bb, mir);
- }
- if (tgtMethod.compare("float java.lang.Float.intBitsToFloat(int)") == 0) {
- return genInlinedFloatCvt(cUnit, bb, mir);
- }
- if (tgtMethod.compare("long java.lang.Double.doubleToRawLongBits(double)") == 0) {
- return genInlinedDoubleCvt(cUnit, bb, mir);
- }
- if (tgtMethod.compare("double java.lang.Double.longBitsToDouble(long)") == 0) {
- return genInlinedDoubleCvt(cUnit, bb, mir);
- }
- if (tgtMethod.compare("int java.lang.String.indexOf(int, int)") == 0) {
- return genInlinedIndexOf(cUnit, bb, mir, type, false /* base 0 */);
- }
- if (tgtMethod.compare("int java.lang.String.indexOf(int)") == 0) {
- return genInlinedIndexOf(cUnit, bb, mir, type, true /* base 0 */);
- }
- if (tgtMethod.compare("int java.lang.String.compareTo(java.lang.String)") == 0) {
- return genInlinedStringCompareTo(cUnit, bb, mir, type);
- }
+ if ((mir->optimizationFlags & MIR_INLINED) || isRange) {
return false;
+ }
+ /*
+ * TODO: move these to a target-specific structured constant array
+ * and use a generic match function. The list of intrinsics may be
+ * slightly different depending on target.
+ * TODO: Fold this into a matching function that runs during
+ * basic block building. This should be part of the action for
+ * small method inlining and recognition of the special object init
+ * method. By doing this during basic block construction, we can also
+ * take advantage of/generate new useful dataflow info.
+ */
+ std::string tgtMethod(PrettyMethod(mir->dalvikInsn.vB, *cUnit->dex_file));
+ if (tgtMethod.compare("char java.lang.String.charAt(int)") == 0) {
+ return genInlinedCharAt(cUnit, bb, mir, type, isRange);
+ }
+ if (tgtMethod.compare("int java.lang.Math.min(int, int)") == 0) {
+ return genInlinedMinMaxInt(cUnit, bb, mir, true /* isMin */);
+ }
+ if (tgtMethod.compare("int java.lang.Math.max(int, int)") == 0) {
+ return genInlinedMinMaxInt(cUnit, bb, mir, false /* isMin */);
+ }
+ if (tgtMethod.compare("int java.lang.String.length()") == 0) {
+ return genInlinedStringIsEmptyOrLength(cUnit, bb, mir, false /* isEmpty */);
+ }
+ if (tgtMethod.compare("boolean java.lang.String.isEmpty()") == 0) {
+ return genInlinedStringIsEmptyOrLength(cUnit, bb, mir, true /* isEmpty */);
+ }
+ if (tgtMethod.compare("int java.lang.Math.abs(int)") == 0) {
+ return genInlinedAbsInt(cUnit, bb, mir);
+ }
+ if (tgtMethod.compare("long java.lang.Math.abs(long)") == 0) {
+ return genInlinedAbsLong(cUnit, bb, mir);
+ }
+ if (tgtMethod.compare("int java.lang.Float.floatToRawIntBits(float)") == 0) {
+ return genInlinedFloatCvt(cUnit, bb, mir);
+ }
+ if (tgtMethod.compare("float java.lang.Float.intBitsToFloat(int)") == 0) {
+ return genInlinedFloatCvt(cUnit, bb, mir);
+ }
+ if (tgtMethod.compare("long java.lang.Double.doubleToRawLongBits(double)") == 0) {
+ return genInlinedDoubleCvt(cUnit, bb, mir);
+ }
+ if (tgtMethod.compare("double java.lang.Double.longBitsToDouble(long)") == 0) {
+ return genInlinedDoubleCvt(cUnit, bb, mir);
+ }
+ if (tgtMethod.compare("int java.lang.String.indexOf(int, int)") == 0) {
+ return genInlinedIndexOf(cUnit, bb, mir, type, false /* base 0 */);
+ }
+ if (tgtMethod.compare("int java.lang.String.indexOf(int)") == 0) {
+ return genInlinedIndexOf(cUnit, bb, mir, type, true /* base 0 */);
+ }
+ if (tgtMethod.compare("int java.lang.String.compareTo(java.lang.String)") == 0) {
+ return genInlinedStringCompareTo(cUnit, bb, mir, type);
+ }
+ return false;
}
diff --git a/src/compiler/codegen/LocalOptimizations.cc b/src/compiler/codegen/LocalOptimizations.cc
index b7a7e57..55ba03a 100644
--- a/src/compiler/codegen/LocalOptimizations.cc
+++ b/src/compiler/codegen/LocalOptimizations.cc
@@ -29,28 +29,28 @@
inline bool isDalvikRegisterClobbered(LIR* lir1, LIR* lir2)
{
- int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->aliasInfo);
- int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->aliasInfo);
- int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->aliasInfo);
- int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->aliasInfo);
+ int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->aliasInfo);
+ int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->aliasInfo);
+ int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->aliasInfo);
+ int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->aliasInfo);
- return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
+ return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
}
/* Convert a more expensive instruction (ie load) into a move */
void convertMemOpIntoMove(CompilationUnit* cUnit, LIR* origLIR, int dest,
int src)
{
- /* Insert a move to replace the load */
- LIR* moveLIR;
- moveLIR = oatRegCopyNoInsert( cUnit, dest, src);
- /*
- * Insert the converted instruction after the original since the
- * optimization is scannng in the top-down order and the new instruction
- * will need to be re-checked (eg the new dest clobbers the src used in
- * thisLIR).
- */
- oatInsertLIRAfter((LIR*) origLIR, (LIR*) moveLIR);
+ /* Insert a move to replace the load */
+ LIR* moveLIR;
+ moveLIR = oatRegCopyNoInsert( cUnit, dest, src);
+ /*
+ * Insert the converted instruction after the original since the
+ * optimization is scannng in the top-down order and the new instruction
+ * will need to be re-checked (eg the new dest clobbers the src used in
+ * thisLIR).
+ */
+ oatInsertLIRAfter((LIR*) origLIR, (LIR*) moveLIR);
}
/*
@@ -74,185 +74,174 @@
void applyLoadStoreElimination(CompilationUnit* cUnit, LIR* headLIR,
LIR* tailLIR)
{
- LIR* thisLIR;
+ LIR* thisLIR;
- if (headLIR == tailLIR) return;
+ if (headLIR == tailLIR) return;
- for (thisLIR = PREV_LIR(tailLIR);
- thisLIR != headLIR;
- thisLIR = PREV_LIR(thisLIR)) {
- int sinkDistance = 0;
+ for (thisLIR = PREV_LIR(tailLIR);
+ thisLIR != headLIR;
+ thisLIR = PREV_LIR(thisLIR)) {
+ int sinkDistance = 0;
- /* Skip non-interesting instructions */
- if ((thisLIR->flags.isNop == true) ||
- isPseudoOpcode(thisLIR->opcode) ||
- !(EncodingMap[thisLIR->opcode].flags & (IS_LOAD | IS_STORE))) {
- continue;
- }
+ /* Skip non-interesting instructions */
+ if ((thisLIR->flags.isNop == true) ||
+ isPseudoOpcode(thisLIR->opcode) ||
+ !(EncodingMap[thisLIR->opcode].flags & (IS_LOAD | IS_STORE))) {
+ continue;
+ }
- int nativeRegId = thisLIR->operands[0];
- bool isThisLIRLoad = EncodingMap[thisLIR->opcode].flags & IS_LOAD;
- LIR* checkLIR;
- /* Use the mem mask to determine the rough memory location */
- u8 thisMemMask = (thisLIR->useMask | thisLIR->defMask) & ENCODE_MEM;
+ int nativeRegId = thisLIR->operands[0];
+ bool isThisLIRLoad = EncodingMap[thisLIR->opcode].flags & IS_LOAD;
+ LIR* checkLIR;
+ /* Use the mem mask to determine the rough memory location */
+ u8 thisMemMask = (thisLIR->useMask | thisLIR->defMask) & ENCODE_MEM;
- /*
- * Currently only eliminate redundant ld/st for constant and Dalvik
- * register accesses.
- */
- if (!(thisMemMask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
+ /*
+ * Currently only eliminate redundant ld/st for constant and Dalvik
+ * register accesses.
+ */
+ if (!(thisMemMask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
// FIXME: make sure we have a branch barrier for x86
#if defined(TARGET_X86)
- u8 stopUseRegMask = (thisLIR->useMask) & ~ENCODE_MEM;
+ u8 stopUseRegMask = (thisLIR->useMask) & ~ENCODE_MEM;
#else
- /*
- * Add r15 (pc) to the resource mask to prevent this instruction
- * from sinking past branch instructions. Also take out the memory
- * region bits since stopMask is used to check data/control
- * dependencies.
- */
- u8 stopUseRegMask = (ENCODE_REG_PC | thisLIR->useMask) &
- ~ENCODE_MEM;
+ /*
+ * Add r15 (pc) to the resource mask to prevent this instruction
+ * from sinking past branch instructions. Also take out the memory
+ * region bits since stopMask is used to check data/control
+ * dependencies.
+ */
+ u8 stopUseRegMask = (ENCODE_REG_PC | thisLIR->useMask) &
+ ~ENCODE_MEM;
#endif
- u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
+ u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
- for (checkLIR = NEXT_LIR(thisLIR);
- checkLIR != tailLIR;
- checkLIR = NEXT_LIR(checkLIR)) {
+ for (checkLIR = NEXT_LIR(thisLIR);
+ checkLIR != tailLIR;
+ checkLIR = NEXT_LIR(checkLIR)) {
+ /*
+ * Skip already dead instructions (whose dataflow information is
+ * outdated and misleading).
+ */
+ if (checkLIR->flags.isNop) continue;
+
+ u8 checkMemMask = (checkLIR->useMask | checkLIR->defMask) & ENCODE_MEM;
+ u8 aliasCondition = thisMemMask & checkMemMask;
+ bool stopHere = false;
+
+ /*
+ * Potential aliases seen - check the alias relations
+ */
+ if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
+ bool isCheckLIRLoad = EncodingMap[checkLIR->opcode].flags & IS_LOAD;
+ if (aliasCondition == ENCODE_LITERAL) {
+ /*
+ * Should only see literal loads in the instruction
+ * stream.
+ */
+ DCHECK(!(EncodingMap[checkLIR->opcode].flags & IS_STORE));
+ /* Same value && same register type */
+ if (checkLIR->aliasInfo == thisLIR->aliasInfo &&
+ REGTYPE(checkLIR->operands[0]) == REGTYPE(nativeRegId)) {
/*
- * Skip already dead instructions (whose dataflow information is
- * outdated and misleading).
+ * Different destination register - insert
+ * a move
*/
- if (checkLIR->flags.isNop) continue;
-
- u8 checkMemMask = (checkLIR->useMask | checkLIR->defMask) &
- ENCODE_MEM;
- u8 aliasCondition = thisMemMask & checkMemMask;
- bool stopHere = false;
-
+ if (checkLIR->operands[0] != nativeRegId) {
+ convertMemOpIntoMove(cUnit, checkLIR, checkLIR->operands[0],
+ nativeRegId);
+ }
+ checkLIR->flags.isNop = true;
+ }
+ } else if (aliasCondition == ENCODE_DALVIK_REG) {
+ /* Must alias */
+ if (checkLIR->aliasInfo == thisLIR->aliasInfo) {
+ /* Only optimize compatible registers */
+ bool regCompatible =
+ REGTYPE(checkLIR->operands[0]) == REGTYPE(nativeRegId);
+ if ((isThisLIRLoad && isCheckLIRLoad) ||
+ (!isThisLIRLoad && isCheckLIRLoad)) {
+ /* RAR or RAW */
+ if (regCompatible) {
+ /*
+ * Different destination register -
+ * insert a move
+ */
+ if (checkLIR->operands[0] !=
+ nativeRegId) {
+ convertMemOpIntoMove(cUnit, checkLIR, checkLIR->operands[0],
+ nativeRegId);
+ }
+ checkLIR->flags.isNop = true;
+ } else {
+ /*
+ * Destinaions are of different types -
+ * something complicated going on so
+ * stop looking now.
+ */
+ stopHere = true;
+ }
+ } else if (isThisLIRLoad && !isCheckLIRLoad) {
+ /* WAR - register value is killed */
+ stopHere = true;
+ } else if (!isThisLIRLoad && !isCheckLIRLoad) {
+ /* WAW - nuke the earlier store */
+ thisLIR->flags.isNop = true;
+ stopHere = true;
+ }
+ /* Partial overlap */
+ } else if (isDalvikRegisterClobbered(thisLIR, checkLIR)) {
/*
- * Potential aliases seen - check the alias relations
+ * It is actually ok to continue if checkLIR
+ * is a read. But it is hard to make a test
+ * case for this so we just stop here to be
+ * conservative.
*/
- if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
- bool isCheckLIRLoad = EncodingMap[checkLIR->opcode].flags &
- IS_LOAD;
- if (aliasCondition == ENCODE_LITERAL) {
- /*
- * Should only see literal loads in the instruction
- * stream.
- */
- DCHECK(!(EncodingMap[checkLIR->opcode].flags &
- IS_STORE));
- /* Same value && same register type */
- if (checkLIR->aliasInfo == thisLIR->aliasInfo &&
- REGTYPE(checkLIR->operands[0]) == REGTYPE(nativeRegId)) {
- /*
- * Different destination register - insert
- * a move
- */
- if (checkLIR->operands[0] != nativeRegId) {
- convertMemOpIntoMove(cUnit, checkLIR,
- checkLIR->operands[0],
- nativeRegId);
- }
- checkLIR->flags.isNop = true;
- }
- } else if (aliasCondition == ENCODE_DALVIK_REG) {
- /* Must alias */
- if (checkLIR->aliasInfo == thisLIR->aliasInfo) {
- /* Only optimize compatible registers */
- bool regCompatible =
- REGTYPE(checkLIR->operands[0]) ==
- REGTYPE(nativeRegId);
- if ((isThisLIRLoad && isCheckLIRLoad) ||
- (!isThisLIRLoad && isCheckLIRLoad)) {
- /* RAR or RAW */
- if (regCompatible) {
- /*
- * Different destination register -
- * insert a move
- */
- if (checkLIR->operands[0] !=
- nativeRegId) {
- convertMemOpIntoMove(cUnit,
- checkLIR,
- checkLIR->operands[0],
- nativeRegId);
- }
- checkLIR->flags.isNop = true;
- } else {
- /*
- * Destinaions are of different types -
- * something complicated going on so
- * stop looking now.
- */
- stopHere = true;
- }
- } else if (isThisLIRLoad && !isCheckLIRLoad) {
- /* WAR - register value is killed */
- stopHere = true;
- } else if (!isThisLIRLoad && !isCheckLIRLoad) {
- /* WAW - nuke the earlier store */
- thisLIR->flags.isNop = true;
- stopHere = true;
- }
- /* Partial overlap */
- } else if (isDalvikRegisterClobbered(thisLIR, checkLIR)) {
- /*
- * It is actually ok to continue if checkLIR
- * is a read. But it is hard to make a test
- * case for this so we just stop here to be
- * conservative.
- */
- stopHere = true;
- }
- }
- /* Memory content may be updated. Stop looking now. */
- if (stopHere) {
- break;
- /* The checkLIR has been transformed - check the next one */
- } else if (checkLIR->flags.isNop) {
- continue;
- }
- }
-
-
- /*
- * this and check LIRs have no memory dependency. Now check if
- * their register operands have any RAW, WAR, and WAW
- * dependencies. If so, stop looking.
- */
- if (stopHere == false) {
- stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask,
- checkLIR);
- }
-
- if (stopHere == true) {
- DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR,
- "REG CLOBBERED"));
- /* Only sink store instructions */
- if (sinkDistance && !isThisLIRLoad) {
- LIR* newStoreLIR =
- (LIR* ) oatNew(cUnit, sizeof(LIR), true,
- kAllocLIR);
- *newStoreLIR = *thisLIR;
- /*
- * Stop point found - insert *before* the checkLIR
- * since the instruction list is scanned in the
- * top-down order.
- */
- oatInsertLIRBefore((LIR*) checkLIR,
- (LIR*) newStoreLIR);
- thisLIR->flags.isNop = true;
- }
- break;
- } else if (!checkLIR->flags.isNop) {
- sinkDistance++;
- }
+ stopHere = true;
+ }
}
+ /* Memory content may be updated. Stop looking now. */
+ if (stopHere) {
+ break;
+ /* The checkLIR has been transformed - check the next one */
+ } else if (checkLIR->flags.isNop) {
+ continue;
+ }
+ }
+
+
+ /*
+ * this and check LIRs have no memory dependency. Now check if
+ * their register operands have any RAW, WAR, and WAW
+ * dependencies. If so, stop looking.
+ */
+ if (stopHere == false) {
+ stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask, checkLIR);
+ }
+
+ if (stopHere == true) {
+ DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR, "REG CLOBBERED"));
+ /* Only sink store instructions */
+ if (sinkDistance && !isThisLIRLoad) {
+ LIR* newStoreLIR =
+ (LIR* ) oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
+ *newStoreLIR = *thisLIR;
+ /*
+ * Stop point found - insert *before* the checkLIR
+ * since the instruction list is scanned in the
+ * top-down order.
+ */
+ oatInsertLIRBefore((LIR*) checkLIR, (LIR*) newStoreLIR);
+ thisLIR->flags.isNop = true;
+ }
+ break;
+ } else if (!checkLIR->flags.isNop) {
+ sinkDistance++;
+ }
}
+ }
}
/*
@@ -261,192 +250,191 @@
*/
void applyLoadHoisting(CompilationUnit* cUnit, LIR* headLIR, LIR* tailLIR)
{
- LIR* thisLIR, *checkLIR;
- /*
- * Store the list of independent instructions that can be hoisted past.
- * Will decide the best place to insert later.
- */
- LIR* prevInstList[MAX_HOIST_DISTANCE];
+ LIR* thisLIR, *checkLIR;
+ /*
+ * Store the list of independent instructions that can be hoisted past.
+ * Will decide the best place to insert later.
+ */
+ LIR* prevInstList[MAX_HOIST_DISTANCE];
- /* Empty block */
- if (headLIR == tailLIR) return;
+ /* Empty block */
+ if (headLIR == tailLIR) return;
- /* Start from the second instruction */
- for (thisLIR = NEXT_LIR(headLIR);
- thisLIR != tailLIR;
- thisLIR = NEXT_LIR(thisLIR)) {
+ /* Start from the second instruction */
+ for (thisLIR = NEXT_LIR(headLIR);
+ thisLIR != tailLIR;
+ thisLIR = NEXT_LIR(thisLIR)) {
- /* Skip non-interesting instructions */
- if ((thisLIR->flags.isNop == true) ||
- isPseudoOpcode(thisLIR->opcode) ||
- !(EncodingMap[thisLIR->opcode].flags & IS_LOAD)) {
- continue;
- }
+ /* Skip non-interesting instructions */
+ if ((thisLIR->flags.isNop == true) ||
+ isPseudoOpcode(thisLIR->opcode) ||
+ !(EncodingMap[thisLIR->opcode].flags & IS_LOAD)) {
+ continue;
+ }
- u8 stopUseAllMask = thisLIR->useMask;
+ u8 stopUseAllMask = thisLIR->useMask;
#if !defined(TARGET_X86)
- /*
- * Branches for null/range checks are marked with the true resource
- * bits, and loads to Dalvik registers, constant pools, and non-alias
- * locations are safe to be hoisted. So only mark the heap references
- * conservatively here.
- */
- if (stopUseAllMask & ENCODE_HEAP_REF) {
- stopUseAllMask |= ENCODE_REG_PC;
- }
+ /*
+ * Branches for null/range checks are marked with the true resource
+ * bits, and loads to Dalvik registers, constant pools, and non-alias
+ * locations are safe to be hoisted. So only mark the heap references
+ * conservatively here.
+ */
+ if (stopUseAllMask & ENCODE_HEAP_REF) {
+ stopUseAllMask |= ENCODE_REG_PC;
+ }
#endif
- /* Similar as above, but just check for pure register dependency */
- u8 stopUseRegMask = stopUseAllMask & ~ENCODE_MEM;
- u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
+ /* Similar as above, but just check for pure register dependency */
+ u8 stopUseRegMask = stopUseAllMask & ~ENCODE_MEM;
+ u8 stopDefRegMask = thisLIR->defMask & ~ENCODE_MEM;
- int nextSlot = 0;
- bool stopHere = false;
+ int nextSlot = 0;
+ bool stopHere = false;
- /* Try to hoist the load to a good spot */
- for (checkLIR = PREV_LIR(thisLIR);
- checkLIR != headLIR;
- checkLIR = PREV_LIR(checkLIR)) {
+ /* Try to hoist the load to a good spot */
+ for (checkLIR = PREV_LIR(thisLIR);
+ checkLIR != headLIR;
+ checkLIR = PREV_LIR(checkLIR)) {
- /*
- * Skip already dead instructions (whose dataflow information is
- * outdated and misleading).
- */
- if (checkLIR->flags.isNop) continue;
+ /*
+ * Skip already dead instructions (whose dataflow information is
+ * outdated and misleading).
+ */
+ if (checkLIR->flags.isNop) continue;
- u8 checkMemMask = checkLIR->defMask & ENCODE_MEM;
- u8 aliasCondition = stopUseAllMask & checkMemMask;
- stopHere = false;
+ u8 checkMemMask = checkLIR->defMask & ENCODE_MEM;
+ u8 aliasCondition = stopUseAllMask & checkMemMask;
+ stopHere = false;
- /* Potential WAR alias seen - check the exact relation */
- if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
- /* We can fully disambiguate Dalvik references */
- if (aliasCondition == ENCODE_DALVIK_REG) {
- /* Must alias or partually overlap */
- if ((checkLIR->aliasInfo == thisLIR->aliasInfo) ||
- isDalvikRegisterClobbered(thisLIR, checkLIR)) {
- stopHere = true;
- }
- /* Conservatively treat all heap refs as may-alias */
- } else {
- DCHECK_EQ(aliasCondition, ENCODE_HEAP_REF);
- stopHere = true;
- }
- /* Memory content may be updated. Stop looking now. */
- if (stopHere) {
- prevInstList[nextSlot++] = checkLIR;
- break;
- }
- }
-
- if (stopHere == false) {
- stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask,
- checkLIR);
- }
-
- /*
- * Store the dependent or non-pseudo/indepedent instruction to the
- * list.
- */
- if (stopHere || !isPseudoOpcode(checkLIR->opcode)) {
- prevInstList[nextSlot++] = checkLIR;
- if (nextSlot == MAX_HOIST_DISTANCE) break;
- }
-
- /* Found a new place to put the load - move it here */
- if (stopHere == true) {
- DEBUG_OPT(dumpDependentInsnPair(checkLIR, thisLIR
- "HOIST STOP"));
- break;
- }
+ /* Potential WAR alias seen - check the exact relation */
+ if (checkMemMask != ENCODE_MEM && aliasCondition != 0) {
+ /* We can fully disambiguate Dalvik references */
+ if (aliasCondition == ENCODE_DALVIK_REG) {
+ /* Must alias or partually overlap */
+ if ((checkLIR->aliasInfo == thisLIR->aliasInfo) ||
+ isDalvikRegisterClobbered(thisLIR, checkLIR)) {
+ stopHere = true;
+ }
+ /* Conservatively treat all heap refs as may-alias */
+ } else {
+ DCHECK_EQ(aliasCondition, ENCODE_HEAP_REF);
+ stopHere = true;
}
-
- /*
- * Reached the top - use headLIR as the dependent marker as all labels
- * are barriers.
- */
- if (stopHere == false && nextSlot < MAX_HOIST_DISTANCE) {
- prevInstList[nextSlot++] = headLIR;
+ /* Memory content may be updated. Stop looking now. */
+ if (stopHere) {
+ prevInstList[nextSlot++] = checkLIR;
+ break;
}
+ }
- /*
- * At least one independent instruction is found. Scan in the reversed
- * direction to find a beneficial slot.
- */
- if (nextSlot >= 2) {
- int firstSlot = nextSlot - 2;
- int slot;
- LIR* depLIR = prevInstList[nextSlot-1];
- /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
- if (!isPseudoOpcode(depLIR->opcode) &&
- (EncodingMap[depLIR->opcode].flags & IS_LOAD)) {
- firstSlot -= LDLD_DISTANCE;
- }
- /*
- * Make sure we check slot >= 0 since firstSlot may be negative
- * when the loop is first entered.
- */
- for (slot = firstSlot; slot >= 0; slot--) {
- LIR* curLIR = prevInstList[slot];
- LIR* prevLIR = prevInstList[slot+1];
+ if (stopHere == false) {
+ stopHere = CHECK_REG_DEP(stopUseRegMask, stopDefRegMask,
+ checkLIR);
+ }
- /* Check the highest instruction */
- if (prevLIR->defMask == ENCODE_ALL) {
- /*
- * If the first instruction is a load, don't hoist anything
- * above it since it is unlikely to be beneficial.
- */
- if (EncodingMap[curLIR->opcode].flags & IS_LOAD) continue;
- /*
- * If the remaining number of slots is less than LD_LATENCY,
- * insert the hoisted load here.
- */
- if (slot < LD_LATENCY) break;
- }
+ /*
+ * Store the dependent or non-pseudo/indepedent instruction to the
+ * list.
+ */
+ if (stopHere || !isPseudoOpcode(checkLIR->opcode)) {
+ prevInstList[nextSlot++] = checkLIR;
+ if (nextSlot == MAX_HOIST_DISTANCE) break;
+ }
- /*
- * NOTE: now prevLIR is guaranteed to be a non-pseudo
- * instruction (ie accessing EncodingMap[prevLIR->opcode] is
- * safe).
- *
- * Try to find two instructions with load/use dependency until
- * the remaining instructions are less than LD_LATENCY.
- */
- if (((curLIR->useMask & prevLIR->defMask) &&
- (EncodingMap[prevLIR->opcode].flags & IS_LOAD)) ||
- (slot < LD_LATENCY)) {
- break;
- }
- }
-
- /* Found a slot to hoist to */
- if (slot >= 0) {
- LIR* curLIR = prevInstList[slot];
- LIR* newLoadLIR = (LIR* ) oatNew(cUnit, sizeof(LIR),
- true, kAllocLIR);
- *newLoadLIR = *thisLIR;
- /*
- * Insertion is guaranteed to succeed since checkLIR
- * is never the first LIR on the list
- */
- oatInsertLIRBefore((LIR*) curLIR, (LIR*) newLoadLIR);
- thisLIR->flags.isNop = true;
- }
- }
+ /* Found a new place to put the load - move it here */
+ if (stopHere == true) {
+ DEBUG_OPT(dumpDependentInsnPair(checkLIR, thisLIR "HOIST STOP"));
+ break;
+ }
}
+
+ /*
+ * Reached the top - use headLIR as the dependent marker as all labels
+ * are barriers.
+ */
+ if (stopHere == false && nextSlot < MAX_HOIST_DISTANCE) {
+ prevInstList[nextSlot++] = headLIR;
+ }
+
+ /*
+ * At least one independent instruction is found. Scan in the reversed
+ * direction to find a beneficial slot.
+ */
+ if (nextSlot >= 2) {
+ int firstSlot = nextSlot - 2;
+ int slot;
+ LIR* depLIR = prevInstList[nextSlot-1];
+ /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
+ if (!isPseudoOpcode(depLIR->opcode) &&
+ (EncodingMap[depLIR->opcode].flags & IS_LOAD)) {
+ firstSlot -= LDLD_DISTANCE;
+ }
+ /*
+ * Make sure we check slot >= 0 since firstSlot may be negative
+ * when the loop is first entered.
+ */
+ for (slot = firstSlot; slot >= 0; slot--) {
+ LIR* curLIR = prevInstList[slot];
+ LIR* prevLIR = prevInstList[slot+1];
+
+ /* Check the highest instruction */
+ if (prevLIR->defMask == ENCODE_ALL) {
+ /*
+ * If the first instruction is a load, don't hoist anything
+ * above it since it is unlikely to be beneficial.
+ */
+ if (EncodingMap[curLIR->opcode].flags & IS_LOAD) continue;
+ /*
+ * If the remaining number of slots is less than LD_LATENCY,
+ * insert the hoisted load here.
+ */
+ if (slot < LD_LATENCY) break;
+ }
+
+ /*
+ * NOTE: now prevLIR is guaranteed to be a non-pseudo
+ * instruction (ie accessing EncodingMap[prevLIR->opcode] is
+ * safe).
+ *
+ * Try to find two instructions with load/use dependency until
+ * the remaining instructions are less than LD_LATENCY.
+ */
+ if (((curLIR->useMask & prevLIR->defMask) &&
+ (EncodingMap[prevLIR->opcode].flags & IS_LOAD)) ||
+ (slot < LD_LATENCY)) {
+ break;
+ }
+ }
+
+ /* Found a slot to hoist to */
+ if (slot >= 0) {
+ LIR* curLIR = prevInstList[slot];
+ LIR* newLoadLIR = (LIR* ) oatNew(cUnit, sizeof(LIR),
+ true, kAllocLIR);
+ *newLoadLIR = *thisLIR;
+ /*
+ * Insertion is guaranteed to succeed since checkLIR
+ * is never the first LIR on the list
+ */
+ oatInsertLIRBefore((LIR*) curLIR, (LIR*) newLoadLIR);
+ thisLIR->flags.isNop = true;
+ }
+ }
+ }
}
void oatApplyLocalOptimizations(CompilationUnit* cUnit, LIR* headLIR,
- LIR* tailLIR)
+ LIR* tailLIR)
{
- if (!(cUnit->disableOpt & (1 << kLoadStoreElimination))) {
- applyLoadStoreElimination(cUnit, (LIR* ) headLIR,
- (LIR* ) tailLIR);
- }
- if (!(cUnit->disableOpt & (1 << kLoadHoisting))) {
- applyLoadHoisting(cUnit, (LIR* ) headLIR, (LIR* ) tailLIR);
- }
+ if (!(cUnit->disableOpt & (1 << kLoadStoreElimination))) {
+ applyLoadStoreElimination(cUnit, (LIR* ) headLIR,
+ (LIR* ) tailLIR);
+ }
+ if (!(cUnit->disableOpt & (1 << kLoadHoisting))) {
+ applyLoadHoisting(cUnit, (LIR* ) headLIR, (LIR* ) tailLIR);
+ }
}
} // namespace art
diff --git a/src/compiler/codegen/MethodCodegenDriver.cc b/src/compiler/codegen/MethodCodegenDriver.cc
index dd47359..8d6bf54 100644
--- a/src/compiler/codegen/MethodCodegenDriver.cc
+++ b/src/compiler/codegen/MethodCodegenDriver.cc
@@ -19,7 +19,7 @@
namespace art {
#define DISPLAY_MISSING_TARGETS (cUnit->enableDebug & \
- (1 << kDebugDisplayMissingTargets))
+ (1 << kDebugDisplayMissingTargets))
const RegLocation badLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0,
INVALID_REG, INVALID_REG, INVALID_SREG};
@@ -27,15 +27,15 @@
/* Mark register usage state and return long retloc */
RegLocation oatGetReturnWide(CompilationUnit* cUnit, bool isDouble)
{
- RegLocation gpr_res = LOC_C_RETURN_WIDE;
- RegLocation fpr_res = LOC_C_RETURN_WIDE_DOUBLE;
- RegLocation res = isDouble ? fpr_res : gpr_res;
- oatClobber(cUnit, res.lowReg);
- oatClobber(cUnit, res.highReg);
- oatLockTemp(cUnit, res.lowReg);
- oatLockTemp(cUnit, res.highReg);
- oatMarkPair(cUnit, res.lowReg, res.highReg);
- return res;
+ RegLocation gpr_res = LOC_C_RETURN_WIDE;
+ RegLocation fpr_res = LOC_C_RETURN_WIDE_DOUBLE;
+ RegLocation res = isDouble ? fpr_res : gpr_res;
+ oatClobber(cUnit, res.lowReg);
+ oatClobber(cUnit, res.highReg);
+ oatLockTemp(cUnit, res.lowReg);
+ oatLockTemp(cUnit, res.highReg);
+ oatMarkPair(cUnit, res.lowReg, res.highReg);
+ return res;
}
RegLocation oatGetReturn(CompilationUnit* cUnit, bool isFloat)
@@ -43,120 +43,120 @@
RegLocation gpr_res = LOC_C_RETURN;
RegLocation fpr_res = LOC_C_RETURN_FLOAT;
RegLocation res = isFloat ? fpr_res : gpr_res;
- oatClobber(cUnit, res.lowReg);
- if (cUnit->instructionSet == kMips) {
- oatMarkInUse(cUnit, res.lowReg);
- } else {
- oatLockTemp(cUnit, res.lowReg);
- }
- return res;
+ oatClobber(cUnit, res.lowReg);
+ if (cUnit->instructionSet == kMips) {
+ oatMarkInUse(cUnit, res.lowReg);
+ } else {
+ oatLockTemp(cUnit, res.lowReg);
+ }
+ return res;
}
void genInvoke(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
InvokeType type, bool isRange)
{
- if (genIntrinsic(cUnit, bb, mir, type, isRange)) {
- return;
- }
- DecodedInstruction* dInsn = &mir->dalvikInsn;
- InvokeType originalType = type; // avoiding mutation by ComputeInvokeInfo
- int callState = 0;
- LIR* nullCk;
- LIR** pNullCk = NULL;
- NextCallInsn nextCallInsn;
- oatFlushAllRegs(cUnit); /* Everything to home location */
- // Explicit register usage
- oatLockCallTemps(cUnit);
+ if (genIntrinsic(cUnit, bb, mir, type, isRange)) {
+ return;
+ }
+ DecodedInstruction* dInsn = &mir->dalvikInsn;
+ InvokeType originalType = type; // avoiding mutation by ComputeInvokeInfo
+ int callState = 0;
+ LIR* nullCk;
+ LIR** pNullCk = NULL;
+ NextCallInsn nextCallInsn;
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ // Explicit register usage
+ oatLockCallTemps(cUnit);
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file, *cUnit->dex_cache,
- cUnit->code_item, cUnit->method_idx,
- cUnit->access_flags);
+ OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
+ *cUnit->dex_file, *cUnit->dex_cache,
+ cUnit->code_item, cUnit->method_idx,
+ cUnit->access_flags);
- uint32_t dexMethodIdx = dInsn->vB;
- int vtableIdx;
- uintptr_t directCode;
- uintptr_t directMethod;
- bool skipThis;
- bool fastPath =
- cUnit->compiler->ComputeInvokeInfo(dexMethodIdx, &mUnit, type,
- vtableIdx, directCode,
- directMethod)
- && !SLOW_INVOKE_PATH;
- if (type == kInterface) {
- nextCallInsn = fastPath ? nextInterfaceCallInsn
- : nextInterfaceCallInsnWithAccessCheck;
- skipThis = false;
- } else if (type == kDirect) {
- if (fastPath) {
- pNullCk = &nullCk;
- }
- nextCallInsn = fastPath ? nextSDCallInsn : nextDirectCallInsnSP;
- skipThis = false;
- } else if (type == kStatic) {
- nextCallInsn = fastPath ? nextSDCallInsn : nextStaticCallInsnSP;
- skipThis = false;
- } else if (type == kSuper) {
- DCHECK(!fastPath); // Fast path is a direct call.
- nextCallInsn = nextSuperCallInsnSP;
- skipThis = false;
- } else {
- DCHECK_EQ(type, kVirtual);
- nextCallInsn = fastPath ? nextVCallInsn : nextVCallInsnSP;
- skipThis = fastPath;
+ uint32_t dexMethodIdx = dInsn->vB;
+ int vtableIdx;
+ uintptr_t directCode;
+ uintptr_t directMethod;
+ bool skipThis;
+ bool fastPath =
+ cUnit->compiler->ComputeInvokeInfo(dexMethodIdx, &mUnit, type,
+ vtableIdx, directCode,
+ directMethod)
+ && !SLOW_INVOKE_PATH;
+ if (type == kInterface) {
+ nextCallInsn = fastPath ? nextInterfaceCallInsn
+ : nextInterfaceCallInsnWithAccessCheck;
+ skipThis = false;
+ } else if (type == kDirect) {
+ if (fastPath) {
+ pNullCk = &nullCk;
}
- if (!isRange) {
- callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pNullCk,
- nextCallInsn, dexMethodIdx,
- vtableIdx, directCode, directMethod,
- originalType, skipThis);
- } else {
- callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, pNullCk,
- nextCallInsn, dexMethodIdx, vtableIdx,
- directCode, directMethod, originalType,
- skipThis);
- }
- // Finish up any of the call sequence not interleaved in arg loading
- while (callState >= 0) {
- callState = nextCallInsn(cUnit, mir, callState, dexMethodIdx,
- vtableIdx, directCode, directMethod,
- originalType);
- }
- if (DISPLAY_MISSING_TARGETS) {
- genShowTarget(cUnit);
- }
+ nextCallInsn = fastPath ? nextSDCallInsn : nextDirectCallInsnSP;
+ skipThis = false;
+ } else if (type == kStatic) {
+ nextCallInsn = fastPath ? nextSDCallInsn : nextStaticCallInsnSP;
+ skipThis = false;
+ } else if (type == kSuper) {
+ DCHECK(!fastPath); // Fast path is a direct call.
+ nextCallInsn = nextSuperCallInsnSP;
+ skipThis = false;
+ } else {
+ DCHECK_EQ(type, kVirtual);
+ nextCallInsn = fastPath ? nextVCallInsn : nextVCallInsnSP;
+ skipThis = fastPath;
+ }
+ if (!isRange) {
+ callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pNullCk,
+ nextCallInsn, dexMethodIdx,
+ vtableIdx, directCode, directMethod,
+ originalType, skipThis);
+ } else {
+ callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, pNullCk,
+ nextCallInsn, dexMethodIdx, vtableIdx,
+ directCode, directMethod, originalType,
+ skipThis);
+ }
+ // Finish up any of the call sequence not interleaved in arg loading
+ while (callState >= 0) {
+ callState = nextCallInsn(cUnit, mir, callState, dexMethodIdx,
+ vtableIdx, directCode, directMethod,
+ originalType);
+ }
+ if (DISPLAY_MISSING_TARGETS) {
+ genShowTarget(cUnit);
+ }
#if !defined(TARGET_X86)
- opReg(cUnit, kOpBlx, rINVOKE_TGT);
+ opReg(cUnit, kOpBlx, rINVOKE_TGT);
#else
- if (fastPath && type != kInterface) {
- opMem(cUnit, kOpBlx, rARG0, Method::GetCodeOffset().Int32Value());
- } else {
- int trampoline = 0;
- switch (type) {
- case kInterface:
- trampoline = fastPath ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
- : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
- break;
- case kDirect:
- trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
- break;
- case kStatic:
- trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
- break;
- case kSuper:
- trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
- break;
- case kVirtual:
- trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
- break;
- default:
- LOG(FATAL) << "Unexpected invoke type";
- }
- opThreadMem(cUnit, kOpBlx, trampoline);
+ if (fastPath && type != kInterface) {
+ opMem(cUnit, kOpBlx, rARG0, Method::GetCodeOffset().Int32Value());
+ } else {
+ int trampoline = 0;
+ switch (type) {
+ case kInterface:
+ trampoline = fastPath ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
+ : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ break;
+ case kDirect:
+ trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ break;
+ case kStatic:
+ trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ break;
+ case kSuper:
+ trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ break;
+ case kVirtual:
+ trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke type";
}
+ opThreadMem(cUnit, kOpBlx, trampoline);
+ }
#endif
- oatClobberCalleeSave(cUnit);
+ oatClobberCalleeSave(cUnit);
}
/*
@@ -167,867 +167,856 @@
bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
BasicBlock* bb, LIR* labelList)
{
- bool res = false; // Assume success
- RegLocation rlSrc[3];
- RegLocation rlDest = badLoc;
- RegLocation rlResult = badLoc;
- Instruction::Code opcode = mir->dalvikInsn.opcode;
+ bool res = false; // Assume success
+ RegLocation rlSrc[3];
+ RegLocation rlDest = badLoc;
+ RegLocation rlResult = badLoc;
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
- /* Prep Src and Dest locations */
- int nextSreg = 0;
- int nextLoc = 0;
- int attrs = oatDataFlowAttributes[opcode];
- rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
- if (attrs & DF_UA) {
- rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
- nextSreg++;
- } else if (attrs & DF_UA_WIDE) {
- rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
- nextSreg + 1);
- nextSreg+= 2;
- }
- if (attrs & DF_UB) {
- rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
- nextSreg++;
- } else if (attrs & DF_UB_WIDE) {
- rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
- nextSreg + 1);
- nextSreg+= 2;
- }
- if (attrs & DF_UC) {
- rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
- } else if (attrs & DF_UC_WIDE) {
- rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
- nextSreg + 1);
- }
- if (attrs & DF_DA) {
- rlDest = oatGetDest(cUnit, mir, 0);
- } else if (attrs & DF_DA_WIDE) {
- rlDest = oatGetDestWide(cUnit, mir, 0, 1);
- }
+ /* Prep Src and Dest locations */
+ int nextSreg = 0;
+ int nextLoc = 0;
+ int attrs = oatDataFlowAttributes[opcode];
+ rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
+ if (attrs & DF_UA) {
+ rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
+ nextSreg++;
+ } else if (attrs & DF_UA_WIDE) {
+ rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg, nextSreg + 1);
+ nextSreg+= 2;
+ }
+ if (attrs & DF_UB) {
+ rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
+ nextSreg++;
+ } else if (attrs & DF_UB_WIDE) {
+ rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg, nextSreg + 1);
+ nextSreg+= 2;
+ }
+ if (attrs & DF_UC) {
+ rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
+ } else if (attrs & DF_UC_WIDE) {
+ rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg, nextSreg + 1);
+ }
+ if (attrs & DF_DA) {
+ rlDest = oatGetDest(cUnit, mir, 0);
+ } else if (attrs & DF_DA_WIDE) {
+ rlDest = oatGetDestWide(cUnit, mir, 0, 1);
+ }
- switch (opcode) {
- case Instruction::NOP:
- break;
+ switch (opcode) {
+ case Instruction::NOP:
+ break;
- case Instruction::MOVE_EXCEPTION: {
- int exOffset = Thread::ExceptionOffset().Int32Value();
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ case Instruction::MOVE_EXCEPTION: {
+ int exOffset = Thread::ExceptionOffset().Int32Value();
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
#if defined(TARGET_X86)
- newLIR2(cUnit, kX86Mov32RT, rlResult.lowReg, exOffset);
- newLIR2(cUnit, kX86Mov32TI, exOffset, 0);
+ newLIR2(cUnit, kX86Mov32RT, rlResult.lowReg, exOffset);
+ newLIR2(cUnit, kX86Mov32TI, exOffset, 0);
#else
- int resetReg = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
- loadConstant(cUnit, resetReg, 0);
- storeWordDisp(cUnit, rSELF, exOffset, resetReg);
- storeValue(cUnit, rlDest, rlResult);
- oatFreeTemp(cUnit, resetReg);
+ int resetReg = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
+ loadConstant(cUnit, resetReg, 0);
+ storeWordDisp(cUnit, rSELF, exOffset, resetReg);
+ storeValue(cUnit, rlDest, rlResult);
+ oatFreeTemp(cUnit, resetReg);
#endif
- break;
- }
- case Instruction::RETURN_VOID:
- if (!cUnit->attrs & METHOD_IS_LEAF) {
- genSuspendTest(cUnit, mir);
- }
- break;
-
- case Instruction::RETURN:
- case Instruction::RETURN_OBJECT:
- if (!cUnit->attrs & METHOD_IS_LEAF) {
- genSuspendTest(cUnit, mir);
- }
- storeValue(cUnit, oatGetReturn(cUnit, cUnit->shorty[0] == 'F'),
- rlSrc[0]);
- break;
-
- case Instruction::RETURN_WIDE:
- if (!cUnit->attrs & METHOD_IS_LEAF) {
- genSuspendTest(cUnit, mir);
- }
- storeValueWide(cUnit, oatGetReturnWide(cUnit,
- cUnit->shorty[0] == 'D'), rlSrc[0]);
- break;
-
- case Instruction::MOVE_RESULT_WIDE:
- if (mir->optimizationFlags & MIR_INLINED)
- break; // Nop - combined w/ previous invoke
- storeValueWide(cUnit, rlDest, oatGetReturnWide(cUnit, rlDest.fp));
- break;
-
- case Instruction::MOVE_RESULT:
- case Instruction::MOVE_RESULT_OBJECT:
- if (mir->optimizationFlags & MIR_INLINED)
- break; // Nop - combined w/ previous invoke
- storeValue(cUnit, rlDest, oatGetReturn(cUnit, rlDest.fp));
- break;
-
- case Instruction::MOVE:
- case Instruction::MOVE_OBJECT:
- case Instruction::MOVE_16:
- case Instruction::MOVE_OBJECT_16:
- case Instruction::MOVE_FROM16:
- case Instruction::MOVE_OBJECT_FROM16:
- storeValue(cUnit, rlDest, rlSrc[0]);
- break;
-
- case Instruction::MOVE_WIDE:
- case Instruction::MOVE_WIDE_16:
- case Instruction::MOVE_WIDE_FROM16:
- storeValueWide(cUnit, rlDest, rlSrc[0]);
- break;
-
- case Instruction::CONST:
- case Instruction::CONST_4:
- case Instruction::CONST_16:
- rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
- loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
- storeValue(cUnit, rlDest, rlResult);
- break;
-
- case Instruction::CONST_HIGH16:
- rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
- loadConstantNoClobber(cUnit, rlResult.lowReg,
- mir->dalvikInsn.vB << 16);
- storeValue(cUnit, rlDest, rlResult);
- break;
-
- case Instruction::CONST_WIDE_16:
- case Instruction::CONST_WIDE_32:
- rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
- loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
- mir->dalvikInsn.vB,
- (mir->dalvikInsn.vB & 0x80000000) ? -1 : 0);
- storeValueWide(cUnit, rlDest, rlResult);
- break;
-
- case Instruction::CONST_WIDE:
- rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
- loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
- mir->dalvikInsn.vB_wide & 0xffffffff,
- (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
- storeValueWide(cUnit, rlDest, rlResult);
- break;
-
- case Instruction::CONST_WIDE_HIGH16:
- rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
- loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
- 0, mir->dalvikInsn.vB << 16);
- storeValueWide(cUnit, rlDest, rlResult);
- break;
-
- case Instruction::MONITOR_ENTER:
- genMonitorEnter(cUnit, mir, rlSrc[0]);
- break;
-
- case Instruction::MONITOR_EXIT:
- genMonitorExit(cUnit, mir, rlSrc[0]);
- break;
-
- case Instruction::CHECK_CAST:
- genCheckCast(cUnit, mir, rlSrc[0]);
- break;
-
- case Instruction::INSTANCE_OF:
- genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
- break;
-
- case Instruction::NEW_INSTANCE:
- genNewInstance(cUnit, mir, rlDest);
- break;
-
- case Instruction::THROW:
- genThrow(cUnit, mir, rlSrc[0]);
- break;
-
- case Instruction::THROW_VERIFICATION_ERROR:
- genThrowVerificationError(cUnit, mir);
- break;
-
- case Instruction::ARRAY_LENGTH:
- int lenOffset;
- lenOffset = Array::LengthOffset().Int32Value();
- rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
- genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg, mir);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
- rlResult.lowReg);
- storeValue(cUnit, rlDest, rlResult);
- break;
-
- case Instruction::CONST_STRING:
- case Instruction::CONST_STRING_JUMBO:
- genConstString(cUnit, mir, rlDest, rlSrc[0]);
- break;
-
- case Instruction::CONST_CLASS:
- genConstClass(cUnit, mir, rlDest, rlSrc[0]);
- break;
-
- case Instruction::FILL_ARRAY_DATA:
- genFillArrayData(cUnit, mir, rlSrc[0]);
- break;
-
- case Instruction::FILLED_NEW_ARRAY:
- genFilledNewArray(cUnit, mir, false /* not range */);
- break;
-
- case Instruction::FILLED_NEW_ARRAY_RANGE:
- genFilledNewArray(cUnit, mir, true /* range */);
- break;
-
- case Instruction::NEW_ARRAY:
- genNewArray(cUnit, mir, rlDest, rlSrc[0]);
- break;
-
- case Instruction::GOTO:
- case Instruction::GOTO_16:
- case Instruction::GOTO_32:
- if (bb->taken->startOffset <= mir->offset) {
- genSuspendTestAndBranch(cUnit, mir, &labelList[bb->taken->id]);
- } else {
- opUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
- }
- break;
-
- case Instruction::PACKED_SWITCH:
- genPackedSwitch(cUnit, mir, rlSrc[0]);
- break;
-
- case Instruction::SPARSE_SWITCH:
- genSparseSwitch(cUnit, mir, rlSrc[0], labelList);
- break;
-
- case Instruction::CMPL_FLOAT:
- case Instruction::CMPG_FLOAT:
- case Instruction::CMPL_DOUBLE:
- case Instruction::CMPG_DOUBLE:
- res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
- break;
-
- case Instruction::CMP_LONG:
- genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
- break;
-
- case Instruction::IF_EQ:
- case Instruction::IF_NE:
- case Instruction::IF_LT:
- case Instruction::IF_GE:
- case Instruction::IF_GT:
- case Instruction::IF_LE: {
- bool backwardBranch;
- backwardBranch = (bb->taken->startOffset <= mir->offset);
- if (backwardBranch) {
- genSuspendTest(cUnit, mir);
- }
- genCompareAndBranch(cUnit, bb, mir, rlSrc[0], rlSrc[1], labelList);
- break;
- }
-
- case Instruction::IF_EQZ:
- case Instruction::IF_NEZ:
- case Instruction::IF_LTZ:
- case Instruction::IF_GEZ:
- case Instruction::IF_GTZ:
- case Instruction::IF_LEZ: {
- bool backwardBranch;
- backwardBranch = (bb->taken->startOffset <= mir->offset);
- if (backwardBranch) {
- genSuspendTest(cUnit, mir);
- }
- genCompareZeroAndBranch(cUnit, bb, mir, rlSrc[0], labelList);
- break;
- }
-
- case Instruction::AGET_WIDE:
- genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
- break;
- case Instruction::AGET:
- case Instruction::AGET_OBJECT:
- genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
- break;
- case Instruction::AGET_BOOLEAN:
- genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
- rlDest, 0);
- break;
- case Instruction::AGET_BYTE:
- genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
- break;
- case Instruction::AGET_CHAR:
- genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
- rlDest, 1);
- break;
- case Instruction::AGET_SHORT:
- genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
- break;
- case Instruction::APUT_WIDE:
- genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
- break;
- case Instruction::APUT:
- genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
- break;
- case Instruction::APUT_OBJECT:
- genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
- break;
- case Instruction::APUT_SHORT:
- case Instruction::APUT_CHAR:
- genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
- rlSrc[0], 1);
- break;
- case Instruction::APUT_BYTE:
- case Instruction::APUT_BOOLEAN:
- genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
- rlSrc[0], 0);
- break;
-
- case Instruction::IGET_OBJECT:
- //case Instruction::IGET_OBJECT_VOLATILE:
- genIGet(cUnit, mir, kWord, rlDest, rlSrc[0], false, true);
- break;
-
- case Instruction::IGET_WIDE:
- //case Instruction::IGET_WIDE_VOLATILE:
- genIGet(cUnit, mir, kLong, rlDest, rlSrc[0], true, false);
- break;
-
- case Instruction::IGET:
- //case Instruction::IGET_VOLATILE:
- genIGet(cUnit, mir, kWord, rlDest, rlSrc[0], false, false);
- break;
-
- case Instruction::IGET_CHAR:
- genIGet(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0], false, false);
- break;
-
- case Instruction::IGET_SHORT:
- genIGet(cUnit, mir, kSignedHalf, rlDest, rlSrc[0], false, false);
- break;
-
- case Instruction::IGET_BOOLEAN:
- case Instruction::IGET_BYTE:
- genIGet(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0], false, false);
- break;
-
- case Instruction::IPUT_WIDE:
- //case Instruction::IPUT_WIDE_VOLATILE:
- genIPut(cUnit, mir, kLong, rlSrc[0], rlSrc[1], true, false);
- break;
-
- case Instruction::IPUT_OBJECT:
- //case Instruction::IPUT_OBJECT_VOLATILE:
- genIPut(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false, true);
- break;
-
- case Instruction::IPUT:
- //case Instruction::IPUT_VOLATILE:
- genIPut(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false, false);
- break;
-
- case Instruction::IPUT_BOOLEAN:
- case Instruction::IPUT_BYTE:
- genIPut(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false, false);
- break;
-
- case Instruction::IPUT_CHAR:
- genIPut(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false, false);
- break;
-
- case Instruction::IPUT_SHORT:
- genIPut(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false, false);
- break;
-
- case Instruction::SGET_OBJECT:
- genSget(cUnit, mir, rlDest, false, true);
- break;
- case Instruction::SGET:
- case Instruction::SGET_BOOLEAN:
- case Instruction::SGET_BYTE:
- case Instruction::SGET_CHAR:
- case Instruction::SGET_SHORT:
- genSget(cUnit, mir, rlDest, false, false);
- break;
-
- case Instruction::SGET_WIDE:
- genSget(cUnit, mir, rlDest, true, false);
- break;
-
- case Instruction::SPUT_OBJECT:
- genSput(cUnit, mir, rlSrc[0], false, true);
- break;
-
- case Instruction::SPUT:
- case Instruction::SPUT_BOOLEAN:
- case Instruction::SPUT_BYTE:
- case Instruction::SPUT_CHAR:
- case Instruction::SPUT_SHORT:
- genSput(cUnit, mir, rlSrc[0], false, false);
- break;
-
- case Instruction::SPUT_WIDE:
- genSput(cUnit, mir, rlSrc[0], true, false);
- break;
-
- case Instruction::INVOKE_STATIC_RANGE:
- genInvoke(cUnit, bb, mir, kStatic, true /*range*/);
- break;
- case Instruction::INVOKE_STATIC:
- genInvoke(cUnit, bb, mir, kStatic, false /*range*/);
- break;
-
- case Instruction::INVOKE_DIRECT:
- genInvoke(cUnit, bb, mir, kDirect, false /*range*/);
- break;
- case Instruction::INVOKE_DIRECT_RANGE:
- genInvoke(cUnit, bb, mir, kDirect, true /*range*/);
- break;
-
- case Instruction::INVOKE_VIRTUAL:
- genInvoke(cUnit, bb, mir, kVirtual, false /*range*/);
- break;
- case Instruction::INVOKE_VIRTUAL_RANGE:
- genInvoke(cUnit, bb, mir, kVirtual, true /*range*/);
- break;
-
- case Instruction::INVOKE_SUPER:
- genInvoke(cUnit, bb, mir, kSuper, false /*range*/);
- break;
- case Instruction::INVOKE_SUPER_RANGE:
- genInvoke(cUnit, bb, mir, kSuper, true /*range*/);
- break;
-
- case Instruction::INVOKE_INTERFACE:
- genInvoke(cUnit, bb, mir, kInterface, false /*range*/);
- break;
- case Instruction::INVOKE_INTERFACE_RANGE:
- genInvoke(cUnit, bb, mir, kInterface, true /*range*/);
- break;
-
- case Instruction::NEG_INT:
- case Instruction::NOT_INT:
- res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
- break;
-
- case Instruction::NEG_LONG:
- case Instruction::NOT_LONG:
- res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
- break;
-
- case Instruction::NEG_FLOAT:
- res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
- break;
-
- case Instruction::NEG_DOUBLE:
- res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
- break;
-
- case Instruction::INT_TO_LONG:
- genIntToLong(cUnit, mir, rlDest, rlSrc[0]);
- break;
-
- case Instruction::LONG_TO_INT:
- rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
- rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
- storeValue(cUnit, rlDest, rlSrc[0]);
- break;
-
- case Instruction::INT_TO_BYTE:
- case Instruction::INT_TO_SHORT:
- case Instruction::INT_TO_CHAR:
- genIntNarrowing(cUnit, mir, rlDest, rlSrc[0]);
- break;
-
- case Instruction::INT_TO_FLOAT:
- case Instruction::INT_TO_DOUBLE:
- case Instruction::LONG_TO_FLOAT:
- case Instruction::LONG_TO_DOUBLE:
- case Instruction::FLOAT_TO_INT:
- case Instruction::FLOAT_TO_LONG:
- case Instruction::FLOAT_TO_DOUBLE:
- case Instruction::DOUBLE_TO_INT:
- case Instruction::DOUBLE_TO_LONG:
- case Instruction::DOUBLE_TO_FLOAT:
- genConversion(cUnit, mir);
- break;
-
- case Instruction::ADD_INT:
- case Instruction::SUB_INT:
- case Instruction::MUL_INT:
- case Instruction::DIV_INT:
- case Instruction::REM_INT:
- case Instruction::AND_INT:
- case Instruction::OR_INT:
- case Instruction::XOR_INT:
- case Instruction::SHL_INT:
- case Instruction::SHR_INT:
- case Instruction::USHR_INT:
- case Instruction::ADD_INT_2ADDR:
- case Instruction::SUB_INT_2ADDR:
- case Instruction::MUL_INT_2ADDR:
- case Instruction::DIV_INT_2ADDR:
- case Instruction::REM_INT_2ADDR:
- case Instruction::AND_INT_2ADDR:
- case Instruction::OR_INT_2ADDR:
- case Instruction::XOR_INT_2ADDR:
- case Instruction::SHL_INT_2ADDR:
- case Instruction::SHR_INT_2ADDR:
- case Instruction::USHR_INT_2ADDR:
- genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
- break;
-
- case Instruction::ADD_LONG:
- case Instruction::SUB_LONG:
- case Instruction::MUL_LONG:
- case Instruction::DIV_LONG:
- case Instruction::REM_LONG:
- case Instruction::AND_LONG:
- case Instruction::OR_LONG:
- case Instruction::XOR_LONG:
- case Instruction::ADD_LONG_2ADDR:
- case Instruction::SUB_LONG_2ADDR:
- case Instruction::MUL_LONG_2ADDR:
- case Instruction::DIV_LONG_2ADDR:
- case Instruction::REM_LONG_2ADDR:
- case Instruction::AND_LONG_2ADDR:
- case Instruction::OR_LONG_2ADDR:
- case Instruction::XOR_LONG_2ADDR:
- genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
- break;
-
- case Instruction::SHL_LONG:
- case Instruction::SHR_LONG:
- case Instruction::USHR_LONG:
- case Instruction::SHL_LONG_2ADDR:
- case Instruction::SHR_LONG_2ADDR:
- case Instruction::USHR_LONG_2ADDR:
- genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
- break;
-
- case Instruction::ADD_FLOAT:
- case Instruction::SUB_FLOAT:
- case Instruction::MUL_FLOAT:
- case Instruction::DIV_FLOAT:
- case Instruction::REM_FLOAT:
- case Instruction::ADD_FLOAT_2ADDR:
- case Instruction::SUB_FLOAT_2ADDR:
- case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT_2ADDR:
- case Instruction::REM_FLOAT_2ADDR:
- genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
- break;
-
- case Instruction::ADD_DOUBLE:
- case Instruction::SUB_DOUBLE:
- case Instruction::MUL_DOUBLE:
- case Instruction::DIV_DOUBLE:
- case Instruction::REM_DOUBLE:
- case Instruction::ADD_DOUBLE_2ADDR:
- case Instruction::SUB_DOUBLE_2ADDR:
- case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE_2ADDR:
- case Instruction::REM_DOUBLE_2ADDR:
- genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
- break;
-
- case Instruction::RSUB_INT:
- case Instruction::ADD_INT_LIT16:
- case Instruction::MUL_INT_LIT16:
- case Instruction::DIV_INT_LIT16:
- case Instruction::REM_INT_LIT16:
- case Instruction::AND_INT_LIT16:
- case Instruction::OR_INT_LIT16:
- case Instruction::XOR_INT_LIT16:
- case Instruction::ADD_INT_LIT8:
- case Instruction::RSUB_INT_LIT8:
- case Instruction::MUL_INT_LIT8:
- case Instruction::DIV_INT_LIT8:
- case Instruction::REM_INT_LIT8:
- case Instruction::AND_INT_LIT8:
- case Instruction::OR_INT_LIT8:
- case Instruction::XOR_INT_LIT8:
- case Instruction::SHL_INT_LIT8:
- case Instruction::SHR_INT_LIT8:
- case Instruction::USHR_INT_LIT8:
- genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
- break;
-
- default:
- res = true;
+ break;
}
- return res;
+ case Instruction::RETURN_VOID:
+ if (!cUnit->attrs & METHOD_IS_LEAF) {
+ genSuspendTest(cUnit, mir);
+ }
+ break;
+
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT:
+ if (!cUnit->attrs & METHOD_IS_LEAF) {
+ genSuspendTest(cUnit, mir);
+ }
+ storeValue(cUnit, oatGetReturn(cUnit, cUnit->shorty[0] == 'F'), rlSrc[0]);
+ break;
+
+ case Instruction::RETURN_WIDE:
+ if (!cUnit->attrs & METHOD_IS_LEAF) {
+ genSuspendTest(cUnit, mir);
+ }
+ storeValueWide(cUnit, oatGetReturnWide(cUnit,
+ cUnit->shorty[0] == 'D'), rlSrc[0]);
+ break;
+
+ case Instruction::MOVE_RESULT_WIDE:
+ if (mir->optimizationFlags & MIR_INLINED)
+ break; // Nop - combined w/ previous invoke
+ storeValueWide(cUnit, rlDest, oatGetReturnWide(cUnit, rlDest.fp));
+ break;
+
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_OBJECT:
+ if (mir->optimizationFlags & MIR_INLINED)
+ break; // Nop - combined w/ previous invoke
+ storeValue(cUnit, rlDest, oatGetReturn(cUnit, rlDest.fp));
+ break;
+
+ case Instruction::MOVE:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_OBJECT_16:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_OBJECT_FROM16:
+ storeValue(cUnit, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_WIDE_FROM16:
+ storeValueWide(cUnit, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
+ loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
+ storeValue(cUnit, rlDest, rlResult);
+ break;
+
+ case Instruction::CONST_HIGH16:
+ rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
+ loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB << 16);
+ storeValue(cUnit, rlDest, rlResult);
+ break;
+
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
+ loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
+ mir->dalvikInsn.vB,
+ (mir->dalvikInsn.vB & 0x80000000) ? -1 : 0);
+ storeValueWide(cUnit, rlDest, rlResult);
+ break;
+
+ case Instruction::CONST_WIDE:
+ rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
+ loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
+ mir->dalvikInsn.vB_wide & 0xffffffff,
+ (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
+ storeValueWide(cUnit, rlDest, rlResult);
+ break;
+
+ case Instruction::CONST_WIDE_HIGH16:
+ rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
+ loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
+ 0, mir->dalvikInsn.vB << 16);
+ storeValueWide(cUnit, rlDest, rlResult);
+ break;
+
+ case Instruction::MONITOR_ENTER:
+ genMonitorEnter(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::MONITOR_EXIT:
+ genMonitorExit(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::CHECK_CAST:
+ genCheckCast(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::INSTANCE_OF:
+ genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::NEW_INSTANCE:
+ genNewInstance(cUnit, mir, rlDest);
+ break;
+
+ case Instruction::THROW:
+ genThrow(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::THROW_VERIFICATION_ERROR:
+ genThrowVerificationError(cUnit, mir);
+ break;
+
+ case Instruction::ARRAY_LENGTH:
+ int lenOffset;
+ lenOffset = Array::LengthOffset().Int32Value();
+ rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
+ genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg, mir);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset, rlResult.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
+ break;
+
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ genConstString(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::CONST_CLASS:
+ genConstClass(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::FILL_ARRAY_DATA:
+ genFillArrayData(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::FILLED_NEW_ARRAY:
+ genFilledNewArray(cUnit, mir, false /* not range */);
+ break;
+
+ case Instruction::FILLED_NEW_ARRAY_RANGE:
+ genFilledNewArray(cUnit, mir, true /* range */);
+ break;
+
+ case Instruction::NEW_ARRAY:
+ genNewArray(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ if (bb->taken->startOffset <= mir->offset) {
+ genSuspendTestAndBranch(cUnit, mir, &labelList[bb->taken->id]);
+ } else {
+ opUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
+ }
+ break;
+
+ case Instruction::PACKED_SWITCH:
+ genPackedSwitch(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::SPARSE_SWITCH:
+ genSparseSwitch(cUnit, mir, rlSrc[0], labelList);
+ break;
+
+ case Instruction::CMPL_FLOAT:
+ case Instruction::CMPG_FLOAT:
+ case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPG_DOUBLE:
+ res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::CMP_LONG:
+ genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE: {
+ bool backwardBranch;
+ backwardBranch = (bb->taken->startOffset <= mir->offset);
+ if (backwardBranch) {
+ genSuspendTest(cUnit, mir);
+ }
+ genCompareAndBranch(cUnit, bb, mir, rlSrc[0], rlSrc[1], labelList);
+ break;
+ }
+
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ: {
+ bool backwardBranch;
+ backwardBranch = (bb->taken->startOffset <= mir->offset);
+ if (backwardBranch) {
+ genSuspendTest(cUnit, mir);
+ }
+ genCompareZeroAndBranch(cUnit, bb, mir, rlSrc[0], labelList);
+ break;
+ }
+
+ case Instruction::AGET_WIDE:
+ genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
+ break;
+ case Instruction::AGET:
+ case Instruction::AGET_OBJECT:
+ genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
+ break;
+ case Instruction::AGET_BOOLEAN:
+ genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
+ break;
+ case Instruction::AGET_BYTE:
+ genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
+ break;
+ case Instruction::AGET_CHAR:
+ genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
+ break;
+ case Instruction::AGET_SHORT:
+ genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
+ break;
+ case Instruction::APUT_WIDE:
+ genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
+ break;
+ case Instruction::APUT:
+ genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
+ break;
+ case Instruction::APUT_OBJECT:
+ genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
+ break;
+ case Instruction::APUT_SHORT:
+ case Instruction::APUT_CHAR:
+ genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2], rlSrc[0], 1);
+ break;
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_BOOLEAN:
+ genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
+ rlSrc[0], 0);
+ break;
+
+ case Instruction::IGET_OBJECT:
+ //case Instruction::IGET_OBJECT_VOLATILE:
+ genIGet(cUnit, mir, kWord, rlDest, rlSrc[0], false, true);
+ break;
+
+ case Instruction::IGET_WIDE:
+ //case Instruction::IGET_WIDE_VOLATILE:
+ genIGet(cUnit, mir, kLong, rlDest, rlSrc[0], true, false);
+ break;
+
+ case Instruction::IGET:
+ //case Instruction::IGET_VOLATILE:
+ genIGet(cUnit, mir, kWord, rlDest, rlSrc[0], false, false);
+ break;
+
+ case Instruction::IGET_CHAR:
+ genIGet(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0], false, false);
+ break;
+
+ case Instruction::IGET_SHORT:
+ genIGet(cUnit, mir, kSignedHalf, rlDest, rlSrc[0], false, false);
+ break;
+
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ genIGet(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0], false, false);
+ break;
+
+ case Instruction::IPUT_WIDE:
+ //case Instruction::IPUT_WIDE_VOLATILE:
+ genIPut(cUnit, mir, kLong, rlSrc[0], rlSrc[1], true, false);
+ break;
+
+ case Instruction::IPUT_OBJECT:
+ //case Instruction::IPUT_OBJECT_VOLATILE:
+ genIPut(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false, true);
+ break;
+
+ case Instruction::IPUT:
+ //case Instruction::IPUT_VOLATILE:
+ genIPut(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false, false);
+ break;
+
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ genIPut(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false, false);
+ break;
+
+ case Instruction::IPUT_CHAR:
+ genIPut(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false, false);
+ break;
+
+ case Instruction::IPUT_SHORT:
+ genIPut(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false, false);
+ break;
+
+ case Instruction::SGET_OBJECT:
+ genSget(cUnit, mir, rlDest, false, true);
+ break;
+ case Instruction::SGET:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT:
+ genSget(cUnit, mir, rlDest, false, false);
+ break;
+
+ case Instruction::SGET_WIDE:
+ genSget(cUnit, mir, rlDest, true, false);
+ break;
+
+ case Instruction::SPUT_OBJECT:
+ genSput(cUnit, mir, rlSrc[0], false, true);
+ break;
+
+ case Instruction::SPUT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ genSput(cUnit, mir, rlSrc[0], false, false);
+ break;
+
+ case Instruction::SPUT_WIDE:
+ genSput(cUnit, mir, rlSrc[0], true, false);
+ break;
+
+ case Instruction::INVOKE_STATIC_RANGE:
+ genInvoke(cUnit, bb, mir, kStatic, true /*range*/);
+ break;
+ case Instruction::INVOKE_STATIC:
+ genInvoke(cUnit, bb, mir, kStatic, false /*range*/);
+ break;
+
+ case Instruction::INVOKE_DIRECT:
+ genInvoke(cUnit, bb, mir, kDirect, false /*range*/);
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ genInvoke(cUnit, bb, mir, kDirect, true /*range*/);
+ break;
+
+ case Instruction::INVOKE_VIRTUAL:
+ genInvoke(cUnit, bb, mir, kVirtual, false /*range*/);
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ genInvoke(cUnit, bb, mir, kVirtual, true /*range*/);
+ break;
+
+ case Instruction::INVOKE_SUPER:
+ genInvoke(cUnit, bb, mir, kSuper, false /*range*/);
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ genInvoke(cUnit, bb, mir, kSuper, true /*range*/);
+ break;
+
+ case Instruction::INVOKE_INTERFACE:
+ genInvoke(cUnit, bb, mir, kInterface, false /*range*/);
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ genInvoke(cUnit, bb, mir, kInterface, true /*range*/);
+ break;
+
+ case Instruction::NEG_INT:
+ case Instruction::NOT_INT:
+ res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
+ break;
+
+ case Instruction::NEG_LONG:
+ case Instruction::NOT_LONG:
+ res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
+ break;
+
+ case Instruction::NEG_FLOAT:
+ res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
+ break;
+
+ case Instruction::NEG_DOUBLE:
+ res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
+ break;
+
+ case Instruction::INT_TO_LONG:
+ genIntToLong(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::LONG_TO_INT:
+ rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
+ rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
+ storeValue(cUnit, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::INT_TO_BYTE:
+ case Instruction::INT_TO_SHORT:
+ case Instruction::INT_TO_CHAR:
+ genIntNarrowing(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::INT_TO_FLOAT:
+ case Instruction::INT_TO_DOUBLE:
+ case Instruction::LONG_TO_FLOAT:
+ case Instruction::LONG_TO_DOUBLE:
+ case Instruction::FLOAT_TO_INT:
+ case Instruction::FLOAT_TO_LONG:
+ case Instruction::FLOAT_TO_DOUBLE:
+ case Instruction::DOUBLE_TO_INT:
+ case Instruction::DOUBLE_TO_LONG:
+ case Instruction::DOUBLE_TO_FLOAT:
+ genConversion(cUnit, mir);
+ break;
+
+ case Instruction::ADD_INT:
+ case Instruction::SUB_INT:
+ case Instruction::MUL_INT:
+ case Instruction::DIV_INT:
+ case Instruction::REM_INT:
+ case Instruction::AND_INT:
+ case Instruction::OR_INT:
+ case Instruction::XOR_INT:
+ case Instruction::SHL_INT:
+ case Instruction::SHR_INT:
+ case Instruction::USHR_INT:
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::SUB_INT_2ADDR:
+ case Instruction::MUL_INT_2ADDR:
+ case Instruction::DIV_INT_2ADDR:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::XOR_INT_2ADDR:
+ case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT_2ADDR:
+ genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::ADD_LONG:
+ case Instruction::SUB_LONG:
+ case Instruction::MUL_LONG:
+ case Instruction::DIV_LONG:
+ case Instruction::REM_LONG:
+ case Instruction::AND_LONG:
+ case Instruction::OR_LONG:
+ case Instruction::XOR_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::MUL_LONG_2ADDR:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::REM_LONG_2ADDR:
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::OR_LONG_2ADDR:
+ case Instruction::XOR_LONG_2ADDR:
+ genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::SHL_LONG:
+ case Instruction::SHR_LONG:
+ case Instruction::USHR_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ case Instruction::SHR_LONG_2ADDR:
+ case Instruction::USHR_LONG_2ADDR:
+ genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::ADD_FLOAT:
+ case Instruction::SUB_FLOAT:
+ case Instruction::MUL_FLOAT:
+ case Instruction::DIV_FLOAT:
+ case Instruction::REM_FLOAT:
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT_2ADDR:
+ genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::ADD_DOUBLE:
+ case Instruction::SUB_DOUBLE:
+ case Instruction::MUL_DOUBLE:
+ case Instruction::DIV_DOUBLE:
+ case Instruction::REM_DOUBLE:
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE_2ADDR:
+ genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::RSUB_INT:
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT_LIT16:
+ case Instruction::AND_INT_LIT16:
+ case Instruction::OR_INT_LIT16:
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::ADD_INT_LIT8:
+ case Instruction::RSUB_INT_LIT8:
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::REM_INT_LIT8:
+ case Instruction::AND_INT_LIT8:
+ case Instruction::OR_INT_LIT8:
+ case Instruction::XOR_INT_LIT8:
+ case Instruction::SHL_INT_LIT8:
+ case Instruction::SHR_INT_LIT8:
+ case Instruction::USHR_INT_LIT8:
+ genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+
+ default:
+ res = true;
+ }
+ return res;
}
const char* extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
- "kMirOpPhi",
- "kMirOpCopy",
- "kMirFusedCmplFloat",
- "kMirFusedCmpgFloat",
- "kMirFusedCmplDouble",
- "kMirFusedCmpgDouble",
- "kMirFusedCmpLong",
- "kMirNop",
- "kMirOpNullNRangeUpCheck",
- "kMirOpNullNRangeDownCheck",
- "kMirOpLowerBound",
+ "kMirOpPhi",
+ "kMirOpCopy",
+ "kMirFusedCmplFloat",
+ "kMirFusedCmpgFloat",
+ "kMirFusedCmplDouble",
+ "kMirFusedCmpgDouble",
+ "kMirFusedCmpLong",
+ "kMirNop",
+ "kMirOpNullNRangeUpCheck",
+ "kMirOpNullNRangeDownCheck",
+ "kMirOpLowerBound",
};
/* Extended MIR instructions like PHI */
void handleExtendedMethodMIR(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
{
- int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
- char* msg = NULL;
- if (cUnit->printMe) {
- msg = (char*)oatNew(cUnit, strlen(extendedMIROpNames[opOffset]) + 1,
- false, kAllocDebugInfo);
- strcpy(msg, extendedMIROpNames[opOffset]);
- }
- LIR* op = newLIR1(cUnit, kPseudoExtended, (int) msg);
+ int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
+ char* msg = NULL;
+ if (cUnit->printMe) {
+ msg = (char*)oatNew(cUnit, strlen(extendedMIROpNames[opOffset]) + 1,
+ false, kAllocDebugInfo);
+ strcpy(msg, extendedMIROpNames[opOffset]);
+ }
+ LIR* op = newLIR1(cUnit, kPseudoExtended, (int) msg);
- switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
- case kMirOpPhi: {
- char* ssaString = NULL;
- if (cUnit->printMe) {
- ssaString = oatGetSSAString(cUnit, mir->ssaRep);
- }
- op->flags.isNop = true;
- newLIR1(cUnit, kPseudoSSARep, (int) ssaString);
- break;
- }
- case kMirOpCopy: {
- RegLocation rlSrc = oatGetSrc(cUnit, mir, 0);
- RegLocation rlDest = oatGetDest(cUnit, mir, 0);
- storeValue(cUnit, rlDest, rlSrc);
- break;
- }
-#if defined(TARGET_ARM)
- case kMirOpFusedCmplFloat:
- genFusedFPCmpBranch(cUnit, bb, mir, false /*gt bias*/, false /*double*/);
- break;
- case kMirOpFusedCmpgFloat:
- genFusedFPCmpBranch(cUnit, bb, mir, true /*gt bias*/, false /*double*/);
- break;
- case kMirOpFusedCmplDouble:
- genFusedFPCmpBranch(cUnit, bb, mir, false /*gt bias*/, true /*double*/);
- break;
- case kMirOpFusedCmpgDouble:
- genFusedFPCmpBranch(cUnit, bb, mir, true /*gt bias*/, true /*double*/);
- break;
- case kMirOpFusedCmpLong:
- genFusedLongCmpBranch(cUnit, bb, mir);
- break;
-#endif
- default:
- break;
+ switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
+ case kMirOpPhi: {
+ char* ssaString = NULL;
+ if (cUnit->printMe) {
+ ssaString = oatGetSSAString(cUnit, mir->ssaRep);
+ }
+ op->flags.isNop = true;
+ newLIR1(cUnit, kPseudoSSARep, (int) ssaString);
+ break;
}
+ case kMirOpCopy: {
+ RegLocation rlSrc = oatGetSrc(cUnit, mir, 0);
+ RegLocation rlDest = oatGetDest(cUnit, mir, 0);
+ storeValue(cUnit, rlDest, rlSrc);
+ break;
+ }
+#if defined(TARGET_ARM)
+ case kMirOpFusedCmplFloat:
+ genFusedFPCmpBranch(cUnit, bb, mir, false /*gt bias*/, false /*double*/);
+ break;
+ case kMirOpFusedCmpgFloat:
+ genFusedFPCmpBranch(cUnit, bb, mir, true /*gt bias*/, false /*double*/);
+ break;
+ case kMirOpFusedCmplDouble:
+ genFusedFPCmpBranch(cUnit, bb, mir, false /*gt bias*/, true /*double*/);
+ break;
+ case kMirOpFusedCmpgDouble:
+ genFusedFPCmpBranch(cUnit, bb, mir, true /*gt bias*/, true /*double*/);
+ break;
+ case kMirOpFusedCmpLong:
+ genFusedLongCmpBranch(cUnit, bb, mir);
+ break;
+#endif
+ default:
+ break;
+ }
}
/* Handle the content in each basic block */
bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
{
- MIR* mir;
- LIR* labelList = (LIR*) cUnit->blockLabelList;
- int blockId = bb->id;
+ MIR* mir;
+ LIR* labelList = (LIR*) cUnit->blockLabelList;
+ int blockId = bb->id;
- cUnit->curBlock = bb;
- labelList[blockId].operands[0] = bb->startOffset;
+ cUnit->curBlock = bb;
+ labelList[blockId].operands[0] = bb->startOffset;
- /* Insert the block label */
- labelList[blockId].opcode = kPseudoNormalBlockLabel;
- oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
+ /* Insert the block label */
+ labelList[blockId].opcode = kPseudoNormalBlockLabel;
+ oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
- /* Free temp registers and reset redundant store tracking */
+ /* Free temp registers and reset redundant store tracking */
+ oatResetRegPool(cUnit);
+ oatResetDefTracking(cUnit);
+
+ /*
+ * If control reached us from our immediate predecessor via
+ * fallthrough and we have no other incoming arcs we can
+ * reuse existing liveness. Otherwise, reset.
+ */
+ if (!bb->fallThroughTarget || bb->predecessors->numUsed != 1) {
+ oatClobberAllRegs(cUnit);
+ }
+
+ LIR* headLIR = NULL;
+
+ if (bb->blockType == kEntryBlock) {
+ genEntrySequence(cUnit, bb);
+ } else if (bb->blockType == kExitBlock) {
+ genExitSequence(cUnit, bb);
+ }
+
+ for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
+
oatResetRegPool(cUnit);
- oatResetDefTracking(cUnit);
-
- /*
- * If control reached us from our immediate predecessor via
- * fallthrough and we have no other incoming arcs we can
- * reuse existing liveness. Otherwise, reset.
- */
- if (!bb->fallThroughTarget || bb->predecessors->numUsed != 1) {
- oatClobberAllRegs(cUnit);
+ if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
+ oatClobberAllRegs(cUnit);
}
- LIR* headLIR = NULL;
-
- if (bb->blockType == kEntryBlock) {
- genEntrySequence(cUnit, bb);
- } else if (bb->blockType == kExitBlock) {
- genExitSequence(cUnit, bb);
+ if (cUnit->disableOpt & (1 << kSuppressLoads)) {
+ oatResetDefTracking(cUnit);
}
- for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
-
- oatResetRegPool(cUnit);
- if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
- oatClobberAllRegs(cUnit);
- }
-
- if (cUnit->disableOpt & (1 << kSuppressLoads)) {
- oatResetDefTracking(cUnit);
- }
-
#ifndef NDEBUG
- /* Reset temp tracking sanity check */
- cUnit->liveSReg = INVALID_SREG;
+ /* Reset temp tracking sanity check */
+ cUnit->liveSReg = INVALID_SREG;
#endif
- cUnit->currentDalvikOffset = mir->offset;
+ cUnit->currentDalvikOffset = mir->offset;
- Instruction::Code dalvikOpcode = mir->dalvikInsn.opcode;
- Instruction::Format dalvikFormat = Instruction::FormatOf(dalvikOpcode);
+ Instruction::Code dalvikOpcode = mir->dalvikInsn.opcode;
+ Instruction::Format dalvikFormat = Instruction::FormatOf(dalvikOpcode);
- LIR* boundaryLIR;
+ LIR* boundaryLIR;
- /* Mark the beginning of a Dalvik instruction for line tracking */
- char* instStr = cUnit->printMe ?
- oatGetDalvikDisassembly(cUnit, mir->dalvikInsn, "") : NULL;
- boundaryLIR = newLIR1(cUnit, kPseudoDalvikByteCodeBoundary,
- (intptr_t) instStr);
- cUnit->boundaryMap.Overwrite(mir->offset, boundaryLIR);
- /* Remember the first LIR for this block */
- if (headLIR == NULL) {
- headLIR = boundaryLIR;
- /* Set the first boundaryLIR as a scheduling barrier */
- headLIR->defMask = ENCODE_ALL;
- }
-
- /* If we're compiling for the debugger, generate an update callout */
- if (cUnit->genDebugger) {
- genDebuggerUpdate(cUnit, mir->offset);
- }
-
- /* Don't generate the SSA annotation unless verbose mode is on */
- if (cUnit->printMe && mir->ssaRep) {
- char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
- newLIR1(cUnit, kPseudoSSARep, (int) ssaString);
- }
-
- if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
- handleExtendedMethodMIR(cUnit, bb, mir);
- continue;
- }
-
- bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
- if (notHandled) {
- LOG(FATAL) << StringPrintf("%#06x: Opcode %#x (%s) / Fmt %d not handled",
- mir->offset, dalvikOpcode, Instruction::Name(dalvikOpcode), dalvikFormat);
-
- }
+ /* Mark the beginning of a Dalvik instruction for line tracking */
+ char* instStr = cUnit->printMe ?
+ oatGetDalvikDisassembly(cUnit, mir->dalvikInsn, "") : NULL;
+ boundaryLIR = newLIR1(cUnit, kPseudoDalvikByteCodeBoundary,
+ (intptr_t) instStr);
+ cUnit->boundaryMap.Overwrite(mir->offset, boundaryLIR);
+ /* Remember the first LIR for this block */
+ if (headLIR == NULL) {
+ headLIR = boundaryLIR;
+ /* Set the first boundaryLIR as a scheduling barrier */
+ headLIR->defMask = ENCODE_ALL;
}
- if (headLIR) {
- /*
- * Eliminate redundant loads/stores and delay stores into later
- * slots
- */
- oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
- cUnit->lastLIRInsn);
-
- /*
- * Generate an unconditional branch to the fallthrough block.
- */
- if (bb->fallThrough) {
- opUnconditionalBranch(cUnit,
- &labelList[bb->fallThrough->id]);
- }
+ /* If we're compiling for the debugger, generate an update callout */
+ if (cUnit->genDebugger) {
+ genDebuggerUpdate(cUnit, mir->offset);
}
- return false;
+
+ /* Don't generate the SSA annotation unless verbose mode is on */
+ if (cUnit->printMe && mir->ssaRep) {
+ char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
+ newLIR1(cUnit, kPseudoSSARep, (int) ssaString);
+ }
+
+ if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
+ handleExtendedMethodMIR(cUnit, bb, mir);
+ continue;
+ }
+
+ bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
+ if (notHandled) {
+ LOG(FATAL) << StringPrintf("%#06x: Opcode %#x (%s) / Fmt %d not handled",
+ mir->offset, dalvikOpcode,
+ Instruction::Name(dalvikOpcode), dalvikFormat);
+
+ }
+ }
+
+ if (headLIR) {
+ /*
+ * Eliminate redundant loads/stores and delay stores into later
+ * slots
+ */
+ oatApplyLocalOptimizations(cUnit, (LIR*) headLIR, cUnit->lastLIRInsn);
+
+ /*
+ * Generate an unconditional branch to the fallthrough block.
+ */
+ if (bb->fallThrough) {
+ opUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
+ }
+ }
+ return false;
}
/* Set basic block labels */
bool labelBlocks(CompilationUnit* cUnit, BasicBlock* bb)
{
- LIR* labelList = (LIR*) cUnit->blockLabelList;
- int blockId = bb->id;
+ LIR* labelList = (LIR*) cUnit->blockLabelList;
+ int blockId = bb->id;
- cUnit->curBlock = bb;
- labelList[blockId].operands[0] = bb->startOffset;
+ cUnit->curBlock = bb;
+ labelList[blockId].operands[0] = bb->startOffset;
- /* Insert the block label */
- labelList[blockId].opcode = kPseudoNormalBlockLabel;
- return false;
+ /* Insert the block label */
+ labelList[blockId].opcode = kPseudoNormalBlockLabel;
+ return false;
}
void oatSpecialMIR2LIR(CompilationUnit* cUnit, SpecialCaseHandler specialCase)
{
- /* Find the first DalvikByteCode block */
- int numReachableBlocks = cUnit->numReachableBlocks;
- const GrowableList *blockList = &cUnit->blockList;
- BasicBlock*bb = NULL;
- for (int idx = 0; idx < numReachableBlocks; idx++) {
- int dfsIndex = cUnit->dfsOrder.elemList[idx];
- bb = (BasicBlock*)oatGrowableListGetElement(blockList, dfsIndex);
- if (bb->blockType == kDalvikByteCode) {
- break;
- }
+ /* Find the first DalvikByteCode block */
+ int numReachableBlocks = cUnit->numReachableBlocks;
+ const GrowableList *blockList = &cUnit->blockList;
+ BasicBlock*bb = NULL;
+ for (int idx = 0; idx < numReachableBlocks; idx++) {
+ int dfsIndex = cUnit->dfsOrder.elemList[idx];
+ bb = (BasicBlock*)oatGrowableListGetElement(blockList, dfsIndex);
+ if (bb->blockType == kDalvikByteCode) {
+ break;
}
- if (bb == NULL) {
- return;
- }
- DCHECK_EQ(bb->startOffset, 0);
- DCHECK(bb->firstMIRInsn != 0);
+ }
+ if (bb == NULL) {
+ return;
+ }
+ DCHECK_EQ(bb->startOffset, 0);
+ DCHECK(bb->firstMIRInsn != 0);
- /* Get the first instruction */
- MIR* mir = bb->firstMIRInsn;
+ /* Get the first instruction */
+ MIR* mir = bb->firstMIRInsn;
- /* Free temp registers and reset redundant store tracking */
- oatResetRegPool(cUnit);
- oatResetDefTracking(cUnit);
- oatClobberAllRegs(cUnit);
+ /* Free temp registers and reset redundant store tracking */
+ oatResetRegPool(cUnit);
+ oatResetDefTracking(cUnit);
+ oatClobberAllRegs(cUnit);
- genSpecialCase(cUnit, bb, mir, specialCase);
+ genSpecialCase(cUnit, bb, mir, specialCase);
}
void oatMethodMIR2LIR(CompilationUnit* cUnit)
{
- /* Used to hold the labels of each block */
- cUnit->blockLabelList =
- (void *) oatNew(cUnit, sizeof(LIR) * cUnit->numBlocks, true,
- kAllocLIR);
+ /* Used to hold the labels of each block */
+ cUnit->blockLabelList =
+ (void *) oatNew(cUnit, sizeof(LIR) * cUnit->numBlocks, true, kAllocLIR);
- oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
- kPreOrderDFSTraversal, false /* Iterative */);
+ oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
+ kPreOrderDFSTraversal, false /* Iterative */);
- handleSuspendLaunchpads(cUnit);
+ handleSuspendLaunchpads(cUnit);
- handleThrowLaunchpads(cUnit);
+ handleThrowLaunchpads(cUnit);
- handleIntrinsicLaunchpads(cUnit);
+ handleIntrinsicLaunchpads(cUnit);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations))) {
- removeRedundantBranches(cUnit);
- }
+ if (!(cUnit->disableOpt & (1 << kSafeOptimizations))) {
+ removeRedundantBranches(cUnit);
+ }
}
/* Needed by the ld/st optmizatons */
LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
{
- return opRegCopyNoInsert(cUnit, rDest, rSrc);
+ return opRegCopyNoInsert(cUnit, rDest, rSrc);
}
/* Needed by the register allocator */
void oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
{
- opRegCopy(cUnit, rDest, rSrc);
+ opRegCopy(cUnit, rDest, rSrc);
}
/* Needed by the register allocator */
void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
- int srcLo, int srcHi)
+ int srcLo, int srcHi)
{
- opRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
+ opRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
}
void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
- int displacement, int rSrc, OpSize size)
+ int displacement, int rSrc, OpSize size)
{
- storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
+ storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
}
void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
- int displacement, int rSrcLo, int rSrcHi)
+ int displacement, int rSrcLo, int rSrcHi)
{
- storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
+ storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
}
} // namespace art
diff --git a/src/compiler/codegen/Ralloc.h b/src/compiler/codegen/Ralloc.h
index 671dffe..c0b6068 100644
--- a/src/compiler/codegen/Ralloc.h
+++ b/src/compiler/codegen/Ralloc.h
@@ -29,9 +29,9 @@
/* Static register use counts */
struct RefCounts {
- int count;
- int sReg;
- bool doubleStart; // Starting vReg for a double
+ int count;
+ int sReg;
+ bool doubleStart; // Starting vReg for a double
};
@@ -46,36 +46,32 @@
*/
inline int oatSRegHi(int lowSreg) {
- return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
+ return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
}
inline bool oatLiveOut(CompilationUnit* cUnit, int sReg)
{
- //For now.
- return true;
+ //For now.
+ return true;
}
inline int oatSSASrc(MIR* mir, int num)
{
- DCHECK_GT(mir->ssaRep->numUses, num);
- return mir->ssaRep->uses[num];
+ DCHECK_GT(mir->ssaRep->numUses, num);
+ return mir->ssaRep->uses[num];
}
extern RegLocation oatEvalLoc(CompilationUnit* cUnit, RegLocation loc,
- int regClass, bool update);
+ int regClass, bool update);
/* Mark a temp register as dead. Does not affect allocation state. */
extern void oatClobber(CompilationUnit* cUnit, int reg);
-
-extern RegLocation oatUpdateLoc(CompilationUnit* cUnit,
- RegLocation loc);
+extern RegLocation oatUpdateLoc(CompilationUnit* cUnit, RegLocation loc);
/* see comments for updateLoc */
-extern RegLocation oatUpdateLocWide(CompilationUnit* cUnit,
- RegLocation loc);
+extern RegLocation oatUpdateLocWide(CompilationUnit* cUnit, RegLocation loc);
-extern RegLocation oatUpdateRawLoc(CompilationUnit* cUnit,
- RegLocation loc);
+extern RegLocation oatUpdateRawLoc(CompilationUnit* cUnit, RegLocation loc);
extern void oatMarkLive(CompilationUnit* cUnit, int reg, int sReg);
@@ -85,8 +81,7 @@
extern void oatMarkDirty(CompilationUnit* cUnit, RegLocation loc);
-extern void oatMarkPair(CompilationUnit* cUnit, int lowReg,
- int highReg);
+extern void oatMarkPair(CompilationUnit* cUnit, int lowReg, int highReg);
extern void oatMarkClean(CompilationUnit* cUnit, RegLocation loc);
@@ -102,15 +97,15 @@
* on entry start points to the LIR prior to the beginning of the
* sequence.
*/
-extern void oatMarkDef(CompilationUnit* cUnit, RegLocation rl,
- LIR* start, LIR* finish);
+extern void oatMarkDef(CompilationUnit* cUnit, RegLocation rl, LIR* start,
+ LIR* finish);
/*
* Mark the beginning and end LIR of a def sequence. Note that
* on entry start points to the LIR prior to the beginning of the
* sequence.
*/
extern void oatMarkDefWide(CompilationUnit* cUnit, RegLocation rl,
- LIR* start, LIR* finish);
+ LIR* start, LIR* finish);
extern RegLocation oatGetSrcWide(CompilationUnit* cUnit, MIR* mir,
int low, int high);
@@ -183,8 +178,7 @@
*/
extern void oatLockTemp(CompilationUnit* cUnit, int reg);
-extern RegLocation oatWideToNarrow(CompilationUnit* cUnit,
- RegLocation rl);
+extern RegLocation oatWideToNarrow(CompilationUnit* cUnit, RegLocation rl);
/*
* Free all allocated temps in the temp pools. Note that this does
@@ -213,8 +207,7 @@
extern int oatAllocTypedTempPair(CompilationUnit* cUnit,
bool fpHint, int regClass);
-extern int oatAllocTypedTemp(CompilationUnit* cUnit, bool fpHint,
- int regClass);
+extern int oatAllocTypedTemp(CompilationUnit* cUnit, bool fpHint, int regClass);
extern void oatRegCopyWide(CompilationUnit* cUnit, int destLo,
int destHi, int srcLo, int srcHi);
diff --git a/src/compiler/codegen/RallocUtil.cc b/src/compiler/codegen/RallocUtil.cc
index 6a98d60..14b4159 100644
--- a/src/compiler/codegen/RallocUtil.cc
+++ b/src/compiler/codegen/RallocUtil.cc
@@ -47,15 +47,15 @@
*/
extern void oatResetRegPool(CompilationUnit* cUnit)
{
- int i;
- for (i=0; i < cUnit->regPool->numCoreRegs; i++) {
- if (cUnit->regPool->coreRegs[i].isTemp)
- cUnit->regPool->coreRegs[i].inUse = false;
- }
- for (i=0; i < cUnit->regPool->numFPRegs; i++) {
- if (cUnit->regPool->FPRegs[i].isTemp)
- cUnit->regPool->FPRegs[i].inUse = false;
- }
+ int i;
+ for (i=0; i < cUnit->regPool->numCoreRegs; i++) {
+ if (cUnit->regPool->coreRegs[i].isTemp)
+ cUnit->regPool->coreRegs[i].inUse = false;
+ }
+ for (i=0; i < cUnit->regPool->numFPRegs; i++) {
+ if (cUnit->regPool->FPRegs[i].isTemp)
+ cUnit->regPool->FPRegs[i].inUse = false;
+ }
}
/*
@@ -64,90 +64,88 @@
*/
extern void oatInitPool(RegisterInfo* regs, int* regNums, int num)
{
- int i;
- for (i=0; i < num; i++) {
- regs[i].reg = regNums[i];
- regs[i].inUse = false;
- regs[i].isTemp = false;
- regs[i].pair = false;
- regs[i].live = false;
- regs[i].dirty = false;
- regs[i].sReg = INVALID_SREG;
- }
+ int i;
+ for (i=0; i < num; i++) {
+ regs[i].reg = regNums[i];
+ regs[i].inUse = false;
+ regs[i].isTemp = false;
+ regs[i].pair = false;
+ regs[i].live = false;
+ regs[i].dirty = false;
+ regs[i].sReg = INVALID_SREG;
+ }
}
void dumpRegPool(RegisterInfo* p, int numRegs)
{
- LOG(INFO) << "================================================";
- for (int i = 0; i < numRegs; i++) {
- LOG(INFO) << StringPrintf(
- "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
- p[i].reg, p[i].isTemp, p[i].inUse, p[i].pair, p[i].partner,
- p[i].live, p[i].dirty, p[i].sReg,(int)p[i].defStart,
- (int)p[i].defEnd);
- }
- LOG(INFO) << "================================================";
+ LOG(INFO) << "================================================";
+ for (int i = 0; i < numRegs; i++) {
+ LOG(INFO) << StringPrintf(
+ "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
+ p[i].reg, p[i].isTemp, p[i].inUse, p[i].pair, p[i].partner,
+ p[i].live, p[i].dirty, p[i].sReg,(int)p[i].defStart,
+ (int)p[i].defEnd);
+ }
+ LOG(INFO) << "================================================";
}
void oatDumpCoreRegPool(CompilationUnit* cUnit)
{
- dumpRegPool(cUnit->regPool->coreRegs, cUnit->regPool->numCoreRegs);
+ dumpRegPool(cUnit->regPool->coreRegs, cUnit->regPool->numCoreRegs);
}
void oatDumpFpRegPool(CompilationUnit* cUnit)
{
- dumpRegPool(cUnit->regPool->FPRegs, cUnit->regPool->numFPRegs);
+ dumpRegPool(cUnit->regPool->FPRegs, cUnit->regPool->numFPRegs);
}
/* Mark a temp register as dead. Does not affect allocation state. */
static inline void clobberBody(CompilationUnit *cUnit, RegisterInfo* p)
{
- if (p->isTemp) {
- DCHECK(!(p->live && p->dirty)) << "Live & dirty temp in clobber";
- p->live = false;
- p->sReg = INVALID_SREG;
- p->defStart = NULL;
- p->defEnd = NULL;
- if (p->pair) {
- p->pair = false;
- oatClobber(cUnit, p->partner);
- }
+ if (p->isTemp) {
+ DCHECK(!(p->live && p->dirty)) << "Live & dirty temp in clobber";
+ p->live = false;
+ p->sReg = INVALID_SREG;
+ p->defStart = NULL;
+ p->defEnd = NULL;
+ if (p->pair) {
+ p->pair = false;
+ oatClobber(cUnit, p->partner);
}
+ }
}
/* Mark a temp register as dead. Does not affect allocation state. */
void oatClobber(CompilationUnit* cUnit, int reg)
{
- clobberBody(cUnit, oatGetRegInfo(cUnit, reg));
+ clobberBody(cUnit, oatGetRegInfo(cUnit, reg));
}
void clobberSRegBody(RegisterInfo* p, int numRegs, int sReg)
{
- int i;
- for (i=0; i< numRegs; i++) {
- if (p[i].sReg == sReg) {
- if (p[i].isTemp) {
- p[i].live = false;
- }
- p[i].defStart = NULL;
- p[i].defEnd = NULL;
- }
+ int i;
+ for (i=0; i< numRegs; i++) {
+ if (p[i].sReg == sReg) {
+ if (p[i].isTemp) {
+ p[i].live = false;
+ }
+ p[i].defStart = NULL;
+ p[i].defEnd = NULL;
}
+ }
}
/* Clobber any temp associated with an sReg. Could be in either class */
extern void oatClobberSReg(CompilationUnit* cUnit, int sReg)
{
#ifndef NDEBUG
- /* Reset live temp tracking sanity checker */
- if (sReg == cUnit->liveSReg) {
- cUnit->liveSReg = INVALID_SREG;
- }
+ /* Reset live temp tracking sanity checker */
+ if (sReg == cUnit->liveSReg) {
+ cUnit->liveSReg = INVALID_SREG;
+ }
#endif
- clobberSRegBody(cUnit->regPool->coreRegs, cUnit->regPool->numCoreRegs,
- sReg);
- clobberSRegBody(cUnit->regPool->FPRegs, cUnit->regPool->numFPRegs,
- sReg);
+ clobberSRegBody(cUnit->regPool->coreRegs, cUnit->regPool->numCoreRegs, sReg);
+ clobberSRegBody(cUnit->regPool->FPRegs, cUnit->regPool->numFPRegs, sReg);
}
/*
@@ -161,39 +159,39 @@
*/
int SRegToPMap(CompilationUnit* cUnit, int sReg)
{
- DCHECK_LT(sReg, cUnit->numSSARegs);
- DCHECK_GE(sReg, 0);
- int vReg = SRegToVReg(cUnit, sReg);
- if (vReg >= 0) {
- DCHECK_LT(vReg, cUnit->numDalvikRegisters);
- return vReg;
- } else {
- int pos = std::abs(vReg) - std::abs(SSA_METHOD_BASEREG);
- DCHECK_LE(pos, cUnit->numCompilerTemps);
- return cUnit->numDalvikRegisters + pos;
- }
+ DCHECK_LT(sReg, cUnit->numSSARegs);
+ DCHECK_GE(sReg, 0);
+ int vReg = SRegToVReg(cUnit, sReg);
+ if (vReg >= 0) {
+ DCHECK_LT(vReg, cUnit->numDalvikRegisters);
+ return vReg;
+ } else {
+ int pos = std::abs(vReg) - std::abs(SSA_METHOD_BASEREG);
+ DCHECK_LE(pos, cUnit->numCompilerTemps);
+ return cUnit->numDalvikRegisters + pos;
+ }
}
/* Reserve a callee-save register. Return -1 if none available */
extern int oatAllocPreservedCoreReg(CompilationUnit* cUnit, int sReg)
{
- int res = -1;
- RegisterInfo* coreRegs = cUnit->regPool->coreRegs;
- for (int i = 0; i < cUnit->regPool->numCoreRegs; i++) {
- if (!coreRegs[i].isTemp && !coreRegs[i].inUse) {
- int vReg = SRegToVReg(cUnit, sReg);
- int pMapIdx = SRegToPMap(cUnit, sReg);
- res = coreRegs[i].reg;
- coreRegs[i].inUse = true;
- cUnit->coreSpillMask |= (1 << res);
- cUnit->coreVmapTable.push_back(vReg);
- cUnit->numCoreSpills++;
- cUnit->promotionMap[pMapIdx].coreLocation = kLocPhysReg;
- cUnit->promotionMap[pMapIdx].coreReg = res;
- break;
- }
+ int res = -1;
+ RegisterInfo* coreRegs = cUnit->regPool->coreRegs;
+ for (int i = 0; i < cUnit->regPool->numCoreRegs; i++) {
+ if (!coreRegs[i].isTemp && !coreRegs[i].inUse) {
+ int vReg = SRegToVReg(cUnit, sReg);
+ int pMapIdx = SRegToPMap(cUnit, sReg);
+ res = coreRegs[i].reg;
+ coreRegs[i].inUse = true;
+ cUnit->coreSpillMask |= (1 << res);
+ cUnit->coreVmapTable.push_back(vReg);
+ cUnit->numCoreSpills++;
+ cUnit->promotionMap[pMapIdx].coreLocation = kLocPhysReg;
+ cUnit->promotionMap[pMapIdx].coreReg = res;
+ break;
}
- return res;
+ }
+ return res;
}
/*
@@ -203,22 +201,22 @@
*/
int allocPreservedSingle(CompilationUnit* cUnit, int sReg, bool even)
{
- int res = -1;
- RegisterInfo* FPRegs = cUnit->regPool->FPRegs;
- for (int i = 0; i < cUnit->regPool->numFPRegs; i++) {
- if (!FPRegs[i].isTemp && !FPRegs[i].inUse &&
- ((FPRegs[i].reg & 0x1) == 0) == even) {
- int vReg = SRegToVReg(cUnit, sReg);
- int pMapIdx = SRegToPMap(cUnit, sReg);
- res = FPRegs[i].reg;
- FPRegs[i].inUse = true;
- oatMarkPreservedSingle(cUnit, vReg, res);
- cUnit->promotionMap[pMapIdx].fpLocation = kLocPhysReg;
- cUnit->promotionMap[pMapIdx].fpReg = res;
- break;
- }
+ int res = -1;
+ RegisterInfo* FPRegs = cUnit->regPool->FPRegs;
+ for (int i = 0; i < cUnit->regPool->numFPRegs; i++) {
+ if (!FPRegs[i].isTemp && !FPRegs[i].inUse &&
+ ((FPRegs[i].reg & 0x1) == 0) == even) {
+ int vReg = SRegToVReg(cUnit, sReg);
+ int pMapIdx = SRegToPMap(cUnit, sReg);
+ res = FPRegs[i].reg;
+ FPRegs[i].inUse = true;
+ oatMarkPreservedSingle(cUnit, vReg, res);
+ cUnit->promotionMap[pMapIdx].fpLocation = kLocPhysReg;
+ cUnit->promotionMap[pMapIdx].fpReg = res;
+ break;
}
- return res;
+ }
+ return res;
}
/*
@@ -231,52 +229,52 @@
*/
int allocPreservedDouble(CompilationUnit* cUnit, int sReg)
{
- int res = -1; // Assume failure
- int vReg = SRegToVReg(cUnit, sReg);
- int pMapIdx = SRegToPMap(cUnit, sReg);
- if (cUnit->promotionMap[pMapIdx+1].fpLocation == kLocPhysReg) {
- // Upper reg is already allocated. Can we fit?
- int highReg = cUnit->promotionMap[pMapIdx+1].fpReg;
- if ((highReg & 1) == 0) {
- // High reg is even - fail.
- return res;
- }
- // Is the low reg of the pair free?
- RegisterInfo* p = oatGetRegInfo(cUnit, highReg-1);
- if (p->inUse || p->isTemp) {
- // Already allocated or not preserved - fail.
- return res;
- }
- // OK - good to go.
- res = p->reg;
- p->inUse = true;
- DCHECK_EQ((res & 1), 0);
+ int res = -1; // Assume failure
+ int vReg = SRegToVReg(cUnit, sReg);
+ int pMapIdx = SRegToPMap(cUnit, sReg);
+ if (cUnit->promotionMap[pMapIdx+1].fpLocation == kLocPhysReg) {
+ // Upper reg is already allocated. Can we fit?
+ int highReg = cUnit->promotionMap[pMapIdx+1].fpReg;
+ if ((highReg & 1) == 0) {
+ // High reg is even - fail.
+ return res;
+ }
+ // Is the low reg of the pair free?
+ RegisterInfo* p = oatGetRegInfo(cUnit, highReg-1);
+ if (p->inUse || p->isTemp) {
+ // Already allocated or not preserved - fail.
+ return res;
+ }
+ // OK - good to go.
+ res = p->reg;
+ p->inUse = true;
+ DCHECK_EQ((res & 1), 0);
+ oatMarkPreservedSingle(cUnit, vReg, res);
+ } else {
+ RegisterInfo* FPRegs = cUnit->regPool->FPRegs;
+ for (int i = 0; i < cUnit->regPool->numFPRegs; i++) {
+ if (!FPRegs[i].isTemp && !FPRegs[i].inUse &&
+ ((FPRegs[i].reg & 0x1) == 0x0) &&
+ !FPRegs[i+1].isTemp && !FPRegs[i+1].inUse &&
+ ((FPRegs[i+1].reg & 0x1) == 0x1) &&
+ (FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
+ res = FPRegs[i].reg;
+ FPRegs[i].inUse = true;
oatMarkPreservedSingle(cUnit, vReg, res);
- } else {
- RegisterInfo* FPRegs = cUnit->regPool->FPRegs;
- for (int i = 0; i < cUnit->regPool->numFPRegs; i++) {
- if (!FPRegs[i].isTemp && !FPRegs[i].inUse &&
- ((FPRegs[i].reg & 0x1) == 0x0) &&
- !FPRegs[i+1].isTemp && !FPRegs[i+1].inUse &&
- ((FPRegs[i+1].reg & 0x1) == 0x1) &&
- (FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
- res = FPRegs[i].reg;
- FPRegs[i].inUse = true;
- oatMarkPreservedSingle(cUnit, vReg, res);
- FPRegs[i+1].inUse = true;
- DCHECK_EQ(res + 1, FPRegs[i+1].reg);
- oatMarkPreservedSingle(cUnit, vReg+1, res+1);
- break;
- }
- }
+ FPRegs[i+1].inUse = true;
+ DCHECK_EQ(res + 1, FPRegs[i+1].reg);
+ oatMarkPreservedSingle(cUnit, vReg+1, res+1);
+ break;
+ }
}
- if (res != -1) {
- cUnit->promotionMap[pMapIdx].fpLocation = kLocPhysReg;
- cUnit->promotionMap[pMapIdx].fpReg = res;
- cUnit->promotionMap[pMapIdx+1].fpLocation = kLocPhysReg;
- cUnit->promotionMap[pMapIdx+1].fpReg = res + 1;
- }
- return res;
+ }
+ if (res != -1) {
+ cUnit->promotionMap[pMapIdx].fpLocation = kLocPhysReg;
+ cUnit->promotionMap[pMapIdx].fpReg = res;
+ cUnit->promotionMap[pMapIdx+1].fpLocation = kLocPhysReg;
+ cUnit->promotionMap[pMapIdx+1].fpReg = res + 1;
+ }
+ return res;
}
@@ -287,238 +285,238 @@
* first to allocate an odd register.
*/
extern int oatAllocPreservedFPReg(CompilationUnit* cUnit, int sReg,
- bool doubleStart)
+ bool doubleStart)
{
- int res = -1;
- if (doubleStart) {
- res = allocPreservedDouble(cUnit, sReg);
- }
- if (res == -1) {
- res = allocPreservedSingle(cUnit, sReg, false /* try odd # */);
- }
- if (res == -1)
- res = allocPreservedSingle(cUnit, sReg, true /* try even # */);
- return res;
+ int res = -1;
+ if (doubleStart) {
+ res = allocPreservedDouble(cUnit, sReg);
+ }
+ if (res == -1) {
+ res = allocPreservedSingle(cUnit, sReg, false /* try odd # */);
+ }
+ if (res == -1)
+ res = allocPreservedSingle(cUnit, sReg, true /* try even # */);
+ return res;
}
int allocTempBody(CompilationUnit* cUnit, RegisterInfo* p, int numRegs,
- int* nextTemp, bool required)
+ int* nextTemp, bool required)
{
- int i;
- int next = *nextTemp;
- for (i=0; i< numRegs; i++) {
- if (next >= numRegs)
- next = 0;
- if (p[next].isTemp && !p[next].inUse && !p[next].live) {
- oatClobber(cUnit, p[next].reg);
- p[next].inUse = true;
- p[next].pair = false;
- *nextTemp = next + 1;
- return p[next].reg;
- }
- next++;
+ int i;
+ int next = *nextTemp;
+ for (i=0; i< numRegs; i++) {
+ if (next >= numRegs)
+ next = 0;
+ if (p[next].isTemp && !p[next].inUse && !p[next].live) {
+ oatClobber(cUnit, p[next].reg);
+ p[next].inUse = true;
+ p[next].pair = false;
+ *nextTemp = next + 1;
+ return p[next].reg;
}
- next = *nextTemp;
- for (i=0; i< numRegs; i++) {
- if (next >= numRegs)
- next = 0;
- if (p[next].isTemp && !p[next].inUse) {
- oatClobber(cUnit, p[next].reg);
- p[next].inUse = true;
- p[next].pair = false;
- *nextTemp = next + 1;
- return p[next].reg;
- }
- next++;
+ next++;
+ }
+ next = *nextTemp;
+ for (i=0; i< numRegs; i++) {
+ if (next >= numRegs)
+ next = 0;
+ if (p[next].isTemp && !p[next].inUse) {
+ oatClobber(cUnit, p[next].reg);
+ p[next].inUse = true;
+ p[next].pair = false;
+ *nextTemp = next + 1;
+ return p[next].reg;
}
- if (required) {
- oatCodegenDump(cUnit);
- dumpRegPool(cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs);
- LOG(FATAL) << "No free temp registers";
- }
- return -1; // No register available
+ next++;
+ }
+ if (required) {
+ oatCodegenDump(cUnit);
+ dumpRegPool(cUnit->regPool->coreRegs,
+ cUnit->regPool->numCoreRegs);
+ LOG(FATAL) << "No free temp registers";
+ }
+ return -1; // No register available
}
//REDO: too many assumptions.
extern int oatAllocTempDouble(CompilationUnit* cUnit)
{
- RegisterInfo* p = cUnit->regPool->FPRegs;
- int numRegs = cUnit->regPool->numFPRegs;
- /* Start looking at an even reg */
- int next = cUnit->regPool->nextFPReg & ~0x1;
+ RegisterInfo* p = cUnit->regPool->FPRegs;
+ int numRegs = cUnit->regPool->numFPRegs;
+ /* Start looking at an even reg */
+ int next = cUnit->regPool->nextFPReg & ~0x1;
- // First try to avoid allocating live registers
- for (int i=0; i < numRegs; i+=2) {
- if (next >= numRegs)
- next = 0;
- if ((p[next].isTemp && !p[next].inUse && !p[next].live) &&
- (p[next+1].isTemp && !p[next+1].inUse && !p[next+1].live)) {
- oatClobber(cUnit, p[next].reg);
- oatClobber(cUnit, p[next+1].reg);
- p[next].inUse = true;
- p[next+1].inUse = true;
- DCHECK_EQ((p[next].reg+1), p[next+1].reg);
- DCHECK_EQ((p[next].reg & 0x1), 0);
- cUnit->regPool->nextFPReg = next + 2;
- if (cUnit->regPool->nextFPReg >= numRegs) {
- cUnit->regPool->nextFPReg = 0;
- }
- return p[next].reg;
- }
- next += 2;
+ // First try to avoid allocating live registers
+ for (int i=0; i < numRegs; i+=2) {
+ if (next >= numRegs)
+ next = 0;
+ if ((p[next].isTemp && !p[next].inUse && !p[next].live) &&
+ (p[next+1].isTemp && !p[next+1].inUse && !p[next+1].live)) {
+ oatClobber(cUnit, p[next].reg);
+ oatClobber(cUnit, p[next+1].reg);
+ p[next].inUse = true;
+ p[next+1].inUse = true;
+ DCHECK_EQ((p[next].reg+1), p[next+1].reg);
+ DCHECK_EQ((p[next].reg & 0x1), 0);
+ cUnit->regPool->nextFPReg = next + 2;
+ if (cUnit->regPool->nextFPReg >= numRegs) {
+ cUnit->regPool->nextFPReg = 0;
+ }
+ return p[next].reg;
}
- next = cUnit->regPool->nextFPReg & ~0x1;
+ next += 2;
+ }
+ next = cUnit->regPool->nextFPReg & ~0x1;
- // No choice - find a pair and kill it.
- for (int i=0; i < numRegs; i+=2) {
- if (next >= numRegs)
- next = 0;
- if (p[next].isTemp && !p[next].inUse && p[next+1].isTemp &&
- !p[next+1].inUse) {
- oatClobber(cUnit, p[next].reg);
- oatClobber(cUnit, p[next+1].reg);
- p[next].inUse = true;
- p[next+1].inUse = true;
- DCHECK_EQ((p[next].reg+1), p[next+1].reg);
- DCHECK_EQ((p[next].reg & 0x1), 0);
- cUnit->regPool->nextFPReg = next + 2;
- if (cUnit->regPool->nextFPReg >= numRegs) {
- cUnit->regPool->nextFPReg = 0;
- }
- return p[next].reg;
- }
- next += 2;
+ // No choice - find a pair and kill it.
+ for (int i=0; i < numRegs; i+=2) {
+ if (next >= numRegs)
+ next = 0;
+ if (p[next].isTemp && !p[next].inUse && p[next+1].isTemp &&
+ !p[next+1].inUse) {
+ oatClobber(cUnit, p[next].reg);
+ oatClobber(cUnit, p[next+1].reg);
+ p[next].inUse = true;
+ p[next+1].inUse = true;
+ DCHECK_EQ((p[next].reg+1), p[next+1].reg);
+ DCHECK_EQ((p[next].reg & 0x1), 0);
+ cUnit->regPool->nextFPReg = next + 2;
+ if (cUnit->regPool->nextFPReg >= numRegs) {
+ cUnit->regPool->nextFPReg = 0;
+ }
+ return p[next].reg;
}
- LOG(FATAL) << "No free temp registers (pair)";
- return -1;
+ next += 2;
+ }
+ LOG(FATAL) << "No free temp registers (pair)";
+ return -1;
}
/* Return a temp if one is available, -1 otherwise */
extern int oatAllocFreeTemp(CompilationUnit* cUnit)
{
- return allocTempBody(cUnit, cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs,
- &cUnit->regPool->nextCoreReg, true);
+ return allocTempBody(cUnit, cUnit->regPool->coreRegs,
+ cUnit->regPool->numCoreRegs,
+ &cUnit->regPool->nextCoreReg, true);
}
extern int oatAllocTemp(CompilationUnit* cUnit)
{
- return allocTempBody(cUnit, cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs,
- &cUnit->regPool->nextCoreReg, true);
+ return allocTempBody(cUnit, cUnit->regPool->coreRegs,
+ cUnit->regPool->numCoreRegs,
+ &cUnit->regPool->nextCoreReg, true);
}
extern int oatAllocTempFloat(CompilationUnit* cUnit)
{
- return allocTempBody(cUnit, cUnit->regPool->FPRegs,
- cUnit->regPool->numFPRegs,
- &cUnit->regPool->nextFPReg, true);
+ return allocTempBody(cUnit, cUnit->regPool->FPRegs,
+ cUnit->regPool->numFPRegs,
+ &cUnit->regPool->nextFPReg, true);
}
RegisterInfo* allocLiveBody(RegisterInfo* p, int numRegs, int sReg)
{
- int i;
- if (sReg == -1)
- return NULL;
- for (i=0; i < numRegs; i++) {
- if (p[i].live && (p[i].sReg == sReg)) {
- if (p[i].isTemp)
- p[i].inUse = true;
- return &p[i];
- }
- }
+ int i;
+ if (sReg == -1)
return NULL;
+ for (i=0; i < numRegs; i++) {
+ if (p[i].live && (p[i].sReg == sReg)) {
+ if (p[i].isTemp)
+ p[i].inUse = true;
+ return &p[i];
+ }
+ }
+ return NULL;
}
RegisterInfo* allocLive(CompilationUnit* cUnit, int sReg, int regClass)
{
- RegisterInfo* res = NULL;
- switch (regClass) {
- case kAnyReg:
- res = allocLiveBody(cUnit->regPool->FPRegs,
- cUnit->regPool->numFPRegs, sReg);
- if (res)
- break;
- /* Intentional fallthrough */
- case kCoreReg:
- res = allocLiveBody(cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs, sReg);
- break;
- case kFPReg:
- res = allocLiveBody(cUnit->regPool->FPRegs,
- cUnit->regPool->numFPRegs, sReg);
- break;
- default:
- LOG(FATAL) << "Invalid register type";
- }
- return res;
+ RegisterInfo* res = NULL;
+ switch (regClass) {
+ case kAnyReg:
+ res = allocLiveBody(cUnit->regPool->FPRegs,
+ cUnit->regPool->numFPRegs, sReg);
+ if (res)
+ break;
+ /* Intentional fallthrough */
+ case kCoreReg:
+ res = allocLiveBody(cUnit->regPool->coreRegs,
+ cUnit->regPool->numCoreRegs, sReg);
+ break;
+ case kFPReg:
+ res = allocLiveBody(cUnit->regPool->FPRegs,
+ cUnit->regPool->numFPRegs, sReg);
+ break;
+ default:
+ LOG(FATAL) << "Invalid register type";
+ }
+ return res;
}
extern void oatFreeTemp(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* p = cUnit->regPool->coreRegs;
- int numRegs = cUnit->regPool->numCoreRegs;
- int i;
- for (i=0; i< numRegs; i++) {
- if (p[i].reg == reg) {
- if (p[i].isTemp) {
- p[i].inUse = false;
- }
- p[i].pair = false;
- return;
- }
+ RegisterInfo* p = cUnit->regPool->coreRegs;
+ int numRegs = cUnit->regPool->numCoreRegs;
+ int i;
+ for (i=0; i< numRegs; i++) {
+ if (p[i].reg == reg) {
+ if (p[i].isTemp) {
+ p[i].inUse = false;
+ }
+ p[i].pair = false;
+ return;
}
- p = cUnit->regPool->FPRegs;
- numRegs = cUnit->regPool->numFPRegs;
- for (i=0; i< numRegs; i++) {
- if (p[i].reg == reg) {
- if (p[i].isTemp) {
- p[i].inUse = false;
- }
- p[i].pair = false;
- return;
- }
+ }
+ p = cUnit->regPool->FPRegs;
+ numRegs = cUnit->regPool->numFPRegs;
+ for (i=0; i< numRegs; i++) {
+ if (p[i].reg == reg) {
+ if (p[i].isTemp) {
+ p[i].inUse = false;
+ }
+ p[i].pair = false;
+ return;
}
- LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
+ }
+ LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
}
extern RegisterInfo* oatIsLive(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* p = cUnit->regPool->coreRegs;
- int numRegs = cUnit->regPool->numCoreRegs;
- int i;
- for (i=0; i< numRegs; i++) {
- if (p[i].reg == reg) {
- return p[i].live ? &p[i] : NULL;
- }
+ RegisterInfo* p = cUnit->regPool->coreRegs;
+ int numRegs = cUnit->regPool->numCoreRegs;
+ int i;
+ for (i=0; i< numRegs; i++) {
+ if (p[i].reg == reg) {
+ return p[i].live ? &p[i] : NULL;
}
- p = cUnit->regPool->FPRegs;
- numRegs = cUnit->regPool->numFPRegs;
- for (i=0; i< numRegs; i++) {
- if (p[i].reg == reg) {
- return p[i].live ? &p[i] : NULL;
- }
+ }
+ p = cUnit->regPool->FPRegs;
+ numRegs = cUnit->regPool->numFPRegs;
+ for (i=0; i< numRegs; i++) {
+ if (p[i].reg == reg) {
+ return p[i].live ? &p[i] : NULL;
}
- return NULL;
+ }
+ return NULL;
}
extern RegisterInfo* oatIsTemp(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* p = oatGetRegInfo(cUnit, reg);
- return (p->isTemp) ? p : NULL;
+ RegisterInfo* p = oatGetRegInfo(cUnit, reg);
+ return (p->isTemp) ? p : NULL;
}
extern RegisterInfo* oatIsPromoted(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* p = oatGetRegInfo(cUnit, reg);
- return (p->isTemp) ? NULL : p;
+ RegisterInfo* p = oatGetRegInfo(cUnit, reg);
+ return (p->isTemp) ? NULL : p;
}
extern bool oatIsDirty(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* p = oatGetRegInfo(cUnit, reg);
- return p->dirty;
+ RegisterInfo* p = oatGetRegInfo(cUnit, reg);
+ return p->dirty;
}
/*
@@ -528,53 +526,53 @@
*/
extern void oatLockTemp(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* p = cUnit->regPool->coreRegs;
- int numRegs = cUnit->regPool->numCoreRegs;
- int i;
- for (i=0; i< numRegs; i++) {
- if (p[i].reg == reg) {
- DCHECK(p[i].isTemp);
- p[i].inUse = true;
- p[i].live = false;
- return;
- }
+ RegisterInfo* p = cUnit->regPool->coreRegs;
+ int numRegs = cUnit->regPool->numCoreRegs;
+ int i;
+ for (i=0; i< numRegs; i++) {
+ if (p[i].reg == reg) {
+ DCHECK(p[i].isTemp);
+ p[i].inUse = true;
+ p[i].live = false;
+ return;
}
- p = cUnit->regPool->FPRegs;
- numRegs = cUnit->regPool->numFPRegs;
- for (i=0; i< numRegs; i++) {
- if (p[i].reg == reg) {
- DCHECK(p[i].isTemp);
- p[i].inUse = true;
- p[i].live = false;
- return;
- }
+ }
+ p = cUnit->regPool->FPRegs;
+ numRegs = cUnit->regPool->numFPRegs;
+ for (i=0; i< numRegs; i++) {
+ if (p[i].reg == reg) {
+ DCHECK(p[i].isTemp);
+ p[i].inUse = true;
+ p[i].live = false;
+ return;
}
- LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
+ }
+ LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
}
static inline void resetDefBody(RegisterInfo* p)
{
- p->defStart = NULL;
- p->defEnd = NULL;
+ p->defStart = NULL;
+ p->defEnd = NULL;
}
extern void oatResetDef(CompilationUnit* cUnit, int reg)
{
- resetDefBody(oatGetRegInfo(cUnit, reg));
+ resetDefBody(oatGetRegInfo(cUnit, reg));
}
void nullifyRange(CompilationUnit* cUnit, LIR *start, LIR *finish,
- int sReg1, int sReg2)
+ int sReg1, int sReg2)
{
- if (start && finish) {
- LIR *p;
- DCHECK_EQ(sReg1, sReg2);
- for (p = start; ;p = p->next) {
- oatNopLIR(p);
- if (p == finish)
- break;
- }
+ if (start && finish) {
+ LIR *p;
+ DCHECK_EQ(sReg1, sReg2);
+ for (p = start; ;p = p->next) {
+ oatNopLIR(p);
+ if (p == finish)
+ break;
}
+ }
}
/*
@@ -583,14 +581,14 @@
* sequence.
*/
extern void oatMarkDef(CompilationUnit* cUnit, RegLocation rl,
- LIR *start, LIR *finish)
+ LIR *start, LIR *finish)
{
- DCHECK(!rl.wide);
- DCHECK(start && start->next);
- DCHECK(finish);
- RegisterInfo* p = oatGetRegInfo(cUnit, rl.lowReg);
- p->defStart = start->next;
- p->defEnd = finish;
+ DCHECK(!rl.wide);
+ DCHECK(start && start->next);
+ DCHECK(finish);
+ RegisterInfo* p = oatGetRegInfo(cUnit, rl.lowReg);
+ p->defStart = start->next;
+ p->defEnd = finish;
}
/*
@@ -599,206 +597,204 @@
* sequence.
*/
extern void oatMarkDefWide(CompilationUnit* cUnit, RegLocation rl,
- LIR *start, LIR *finish)
+ LIR *start, LIR *finish)
{
- DCHECK(rl.wide);
- DCHECK(start && start->next);
- DCHECK(finish);
- RegisterInfo* p = oatGetRegInfo(cUnit, rl.lowReg);
- oatResetDef(cUnit, rl.highReg); // Only track low of pair
- p->defStart = start->next;
- p->defEnd = finish;
+ DCHECK(rl.wide);
+ DCHECK(start && start->next);
+ DCHECK(finish);
+ RegisterInfo* p = oatGetRegInfo(cUnit, rl.lowReg);
+ oatResetDef(cUnit, rl.highReg); // Only track low of pair
+ p->defStart = start->next;
+ p->defEnd = finish;
}
extern RegLocation oatWideToNarrow(CompilationUnit* cUnit, RegLocation rl)
{
- DCHECK(rl.wide);
- if (rl.location == kLocPhysReg) {
- RegisterInfo* infoLo = oatGetRegInfo(cUnit, rl.lowReg);
- RegisterInfo* infoHi = oatGetRegInfo(cUnit, rl.highReg);
- if (infoLo->isTemp) {
- infoLo->pair = false;
- infoLo->defStart = NULL;
- infoLo->defEnd = NULL;
- }
- if (infoHi->isTemp) {
- infoHi->pair = false;
- infoHi->defStart = NULL;
- infoHi->defEnd = NULL;
- }
+ DCHECK(rl.wide);
+ if (rl.location == kLocPhysReg) {
+ RegisterInfo* infoLo = oatGetRegInfo(cUnit, rl.lowReg);
+ RegisterInfo* infoHi = oatGetRegInfo(cUnit, rl.highReg);
+ if (infoLo->isTemp) {
+ infoLo->pair = false;
+ infoLo->defStart = NULL;
+ infoLo->defEnd = NULL;
}
- rl.wide = false;
- return rl;
+ if (infoHi->isTemp) {
+ infoHi->pair = false;
+ infoHi->defStart = NULL;
+ infoHi->defEnd = NULL;
+ }
+ }
+ rl.wide = false;
+ return rl;
}
extern void oatResetDefLoc(CompilationUnit* cUnit, RegLocation rl)
{
- DCHECK(!rl.wide);
- RegisterInfo* p = oatIsTemp(cUnit, rl.lowReg);
- if (p && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
- DCHECK(!p->pair);
- nullifyRange(cUnit, p->defStart, p->defEnd,
- p->sReg, rl.sRegLow);
- }
- oatResetDef(cUnit, rl.lowReg);
+ DCHECK(!rl.wide);
+ RegisterInfo* p = oatIsTemp(cUnit, rl.lowReg);
+ if (p && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
+ DCHECK(!p->pair);
+ nullifyRange(cUnit, p->defStart, p->defEnd, p->sReg, rl.sRegLow);
+ }
+ oatResetDef(cUnit, rl.lowReg);
}
extern void oatResetDefLocWide(CompilationUnit* cUnit, RegLocation rl)
{
- DCHECK(rl.wide);
- RegisterInfo* pLow = oatIsTemp(cUnit, rl.lowReg);
- RegisterInfo* pHigh = oatIsTemp(cUnit, rl.highReg);
- if (pLow && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
- DCHECK(pLow->pair);
- nullifyRange(cUnit, pLow->defStart, pLow->defEnd,
- pLow->sReg, rl.sRegLow);
- }
- if (pHigh && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
- DCHECK(pHigh->pair);
- }
- oatResetDef(cUnit, rl.lowReg);
- oatResetDef(cUnit, rl.highReg);
+ DCHECK(rl.wide);
+ RegisterInfo* pLow = oatIsTemp(cUnit, rl.lowReg);
+ RegisterInfo* pHigh = oatIsTemp(cUnit, rl.highReg);
+ if (pLow && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
+ DCHECK(pLow->pair);
+ nullifyRange(cUnit, pLow->defStart, pLow->defEnd, pLow->sReg, rl.sRegLow);
+ }
+ if (pHigh && !(cUnit->disableOpt & (1 << kSuppressLoads))) {
+ DCHECK(pHigh->pair);
+ }
+ oatResetDef(cUnit, rl.lowReg);
+ oatResetDef(cUnit, rl.highReg);
}
extern void oatResetDefTracking(CompilationUnit* cUnit)
{
- int i;
- for (i=0; i< cUnit->regPool->numCoreRegs; i++) {
- resetDefBody(&cUnit->regPool->coreRegs[i]);
- }
- for (i=0; i< cUnit->regPool->numFPRegs; i++) {
- resetDefBody(&cUnit->regPool->FPRegs[i]);
- }
+ int i;
+ for (i=0; i< cUnit->regPool->numCoreRegs; i++) {
+ resetDefBody(&cUnit->regPool->coreRegs[i]);
+ }
+ for (i=0; i< cUnit->regPool->numFPRegs; i++) {
+ resetDefBody(&cUnit->regPool->FPRegs[i]);
+ }
}
extern void oatClobberAllRegs(CompilationUnit* cUnit)
{
- int i;
- for (i=0; i< cUnit->regPool->numCoreRegs; i++) {
- clobberBody(cUnit, &cUnit->regPool->coreRegs[i]);
- }
- for (i=0; i< cUnit->regPool->numFPRegs; i++) {
- clobberBody(cUnit, &cUnit->regPool->FPRegs[i]);
- }
+ int i;
+ for (i=0; i< cUnit->regPool->numCoreRegs; i++) {
+ clobberBody(cUnit, &cUnit->regPool->coreRegs[i]);
+ }
+ for (i=0; i< cUnit->regPool->numFPRegs; i++) {
+ clobberBody(cUnit, &cUnit->regPool->FPRegs[i]);
+ }
}
// Make sure nothing is live and dirty
void flushAllRegsBody(CompilationUnit* cUnit, RegisterInfo* info,
- int numRegs)
+ int numRegs)
{
- int i;
- for (i=0; i < numRegs; i++) {
- if (info[i].live && info[i].dirty) {
- if (info[i].pair) {
- oatFlushRegWide(cUnit, info[i].reg, info[i].partner);
- } else {
- oatFlushReg(cUnit, info[i].reg);
- }
- }
+ int i;
+ for (i=0; i < numRegs; i++) {
+ if (info[i].live && info[i].dirty) {
+ if (info[i].pair) {
+ oatFlushRegWide(cUnit, info[i].reg, info[i].partner);
+ } else {
+ oatFlushReg(cUnit, info[i].reg);
+ }
}
+ }
}
extern void oatFlushAllRegs(CompilationUnit* cUnit)
{
- flushAllRegsBody(cUnit, cUnit->regPool->coreRegs,
- cUnit->regPool->numCoreRegs);
- flushAllRegsBody(cUnit, cUnit->regPool->FPRegs,
- cUnit->regPool->numFPRegs);
- oatClobberAllRegs(cUnit);
+ flushAllRegsBody(cUnit, cUnit->regPool->coreRegs,
+ cUnit->regPool->numCoreRegs);
+ flushAllRegsBody(cUnit, cUnit->regPool->FPRegs,
+ cUnit->regPool->numFPRegs);
+ oatClobberAllRegs(cUnit);
}
//TUNING: rewrite all of this reg stuff. Probably use an attribute table
bool regClassMatches(int regClass, int reg)
{
- if (regClass == kAnyReg) {
- return true;
- } else if (regClass == kCoreReg) {
- return !oatIsFpReg(reg);
- } else {
- return oatIsFpReg(reg);
- }
+ if (regClass == kAnyReg) {
+ return true;
+ } else if (regClass == kCoreReg) {
+ return !oatIsFpReg(reg);
+ } else {
+ return oatIsFpReg(reg);
+ }
}
extern void oatMarkLive(CompilationUnit* cUnit, int reg, int sReg)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- if ((info->reg == reg) && (info->sReg == sReg) && info->live) {
- return; /* already live */
- } else if (sReg != INVALID_SREG) {
- oatClobberSReg(cUnit, sReg);
- if (info->isTemp) {
- info->live = true;
- }
- } else {
- /* Can't be live if no associated sReg */
- DCHECK(info->isTemp);
- info->live = false;
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ if ((info->reg == reg) && (info->sReg == sReg) && info->live) {
+ return; /* already live */
+ } else if (sReg != INVALID_SREG) {
+ oatClobberSReg(cUnit, sReg);
+ if (info->isTemp) {
+ info->live = true;
}
- info->sReg = sReg;
+ } else {
+ /* Can't be live if no associated sReg */
+ DCHECK(info->isTemp);
+ info->live = false;
+ }
+ info->sReg = sReg;
}
extern void oatMarkTemp(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- info->isTemp = true;
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ info->isTemp = true;
}
extern void oatUnmarkTemp(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- info->isTemp = false;
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ info->isTemp = false;
}
extern void oatMarkPair(CompilationUnit* cUnit, int lowReg, int highReg)
{
- RegisterInfo* infoLo = oatGetRegInfo(cUnit, lowReg);
- RegisterInfo* infoHi = oatGetRegInfo(cUnit, highReg);
- infoLo->pair = infoHi->pair = true;
- infoLo->partner = highReg;
- infoHi->partner = lowReg;
+ RegisterInfo* infoLo = oatGetRegInfo(cUnit, lowReg);
+ RegisterInfo* infoHi = oatGetRegInfo(cUnit, highReg);
+ infoLo->pair = infoHi->pair = true;
+ infoLo->partner = highReg;
+ infoHi->partner = lowReg;
}
extern void oatMarkClean(CompilationUnit* cUnit, RegLocation loc)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, loc.lowReg);
+ RegisterInfo* info = oatGetRegInfo(cUnit, loc.lowReg);
+ info->dirty = false;
+ if (loc.wide) {
+ info = oatGetRegInfo(cUnit, loc.highReg);
info->dirty = false;
- if (loc.wide) {
- info = oatGetRegInfo(cUnit, loc.highReg);
- info->dirty = false;
- }
+ }
}
extern void oatMarkDirty(CompilationUnit* cUnit, RegLocation loc)
{
- if (loc.home) {
- // If already home, can't be dirty
- return;
- }
- RegisterInfo* info = oatGetRegInfo(cUnit, loc.lowReg);
+ if (loc.home) {
+ // If already home, can't be dirty
+ return;
+ }
+ RegisterInfo* info = oatGetRegInfo(cUnit, loc.lowReg);
+ info->dirty = true;
+ if (loc.wide) {
+ info = oatGetRegInfo(cUnit, loc.highReg);
info->dirty = true;
- if (loc.wide) {
- info = oatGetRegInfo(cUnit, loc.highReg);
- info->dirty = true;
- }
+ }
}
extern void oatMarkInUse(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- info->inUse = true;
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ info->inUse = true;
}
void copyRegInfo(CompilationUnit* cUnit, int newReg, int oldReg)
{
- RegisterInfo* newInfo = oatGetRegInfo(cUnit, newReg);
- RegisterInfo* oldInfo = oatGetRegInfo(cUnit, oldReg);
- // Target temp status must not change
- bool isTemp = newInfo->isTemp;
- *newInfo = *oldInfo;
- // Restore target's temp status
- newInfo->isTemp = isTemp;
- newInfo->reg = newReg;
+ RegisterInfo* newInfo = oatGetRegInfo(cUnit, newReg);
+ RegisterInfo* oldInfo = oatGetRegInfo(cUnit, oldReg);
+ // Target temp status must not change
+ bool isTemp = newInfo->isTemp;
+ *newInfo = *oldInfo;
+ // Restore target's temp status
+ newInfo->isTemp = isTemp;
+ newInfo->reg = newReg;
}
/*
@@ -813,50 +809,50 @@
extern RegLocation oatUpdateLoc(CompilationUnit* cUnit, RegLocation loc)
{
- DCHECK(!loc.wide);
- DCHECK(oatCheckCorePoolSanity(cUnit));
- if (loc.location != kLocPhysReg) {
- DCHECK((loc.location == kLocDalvikFrame) ||
- (loc.location == kLocCompilerTemp));
- RegisterInfo* infoLo = allocLive(cUnit, loc.sRegLow, kAnyReg);
- if (infoLo) {
- if (infoLo->pair) {
- oatClobber(cUnit, infoLo->reg);
- oatClobber(cUnit, infoLo->partner);
- oatFreeTemp(cUnit, infoLo->reg);
- } else {
- loc.lowReg = infoLo->reg;
- loc.location = kLocPhysReg;
- }
- }
+ DCHECK(!loc.wide);
+ DCHECK(oatCheckCorePoolSanity(cUnit));
+ if (loc.location != kLocPhysReg) {
+ DCHECK((loc.location == kLocDalvikFrame) ||
+ (loc.location == kLocCompilerTemp));
+ RegisterInfo* infoLo = allocLive(cUnit, loc.sRegLow, kAnyReg);
+ if (infoLo) {
+ if (infoLo->pair) {
+ oatClobber(cUnit, infoLo->reg);
+ oatClobber(cUnit, infoLo->partner);
+ oatFreeTemp(cUnit, infoLo->reg);
+ } else {
+ loc.lowReg = infoLo->reg;
+ loc.location = kLocPhysReg;
+ }
}
+ }
- return loc;
+ return loc;
}
bool oatCheckCorePoolSanity(CompilationUnit* cUnit)
{
for (static int i = 0; i < cUnit->regPool->numCoreRegs; i++) {
- if (cUnit->regPool->coreRegs[i].pair) {
- static int myReg = cUnit->regPool->coreRegs[i].reg;
- static int mySreg = cUnit->regPool->coreRegs[i].sReg;
- static int partnerReg = cUnit->regPool->coreRegs[i].partner;
- static RegisterInfo* partner = oatGetRegInfo(cUnit, partnerReg);
- DCHECK(partner != NULL);
- DCHECK(partner->pair);
- DCHECK_EQ(myReg, partner->partner);
- static int partnerSreg = partner->sReg;
- if (mySreg == INVALID_SREG) {
- DCHECK_EQ(partnerSreg, INVALID_SREG);
- } else {
- int diff = mySreg - partnerSreg;
- DCHECK((diff == -1) || (diff == 1));
- }
+ if (cUnit->regPool->coreRegs[i].pair) {
+ static int myReg = cUnit->regPool->coreRegs[i].reg;
+ static int mySreg = cUnit->regPool->coreRegs[i].sReg;
+ static int partnerReg = cUnit->regPool->coreRegs[i].partner;
+ static RegisterInfo* partner = oatGetRegInfo(cUnit, partnerReg);
+ DCHECK(partner != NULL);
+ DCHECK(partner->pair);
+ DCHECK_EQ(myReg, partner->partner);
+ static int partnerSreg = partner->sReg;
+ if (mySreg == INVALID_SREG) {
+ DCHECK_EQ(partnerSreg, INVALID_SREG);
+ } else {
+ int diff = mySreg - partnerSreg;
+ DCHECK((diff == -1) || (diff == 1));
}
- if (!cUnit->regPool->coreRegs[i].live) {
- DCHECK(cUnit->regPool->coreRegs[i].defStart == NULL);
- DCHECK(cUnit->regPool->coreRegs[i].defEnd == NULL);
- }
+ }
+ if (!cUnit->regPool->coreRegs[i].live) {
+ DCHECK(cUnit->regPool->coreRegs[i].defStart == NULL);
+ DCHECK(cUnit->regPool->coreRegs[i].defEnd == NULL);
+ }
}
return true;
}
@@ -864,228 +860,228 @@
/* see comments for updateLoc */
extern RegLocation oatUpdateLocWide(CompilationUnit* cUnit, RegLocation loc)
{
- DCHECK(loc.wide);
- DCHECK(oatCheckCorePoolSanity(cUnit));
- if (loc.location != kLocPhysReg) {
- DCHECK((loc.location == kLocDalvikFrame) ||
- (loc.location == kLocCompilerTemp));
- // Are the dalvik regs already live in physical registers?
- RegisterInfo* infoLo = allocLive(cUnit, loc.sRegLow, kAnyReg);
- RegisterInfo* infoHi = allocLive(cUnit,
- oatSRegHi(loc.sRegLow), kAnyReg);
- bool match = true;
- match = match && (infoLo != NULL);
- match = match && (infoHi != NULL);
- // Are they both core or both FP?
- match = match && (oatIsFpReg(infoLo->reg) == oatIsFpReg(infoHi->reg));
- // If a pair of floating point singles, are they properly aligned?
- if (match && oatIsFpReg(infoLo->reg)) {
- match &= ((infoLo->reg & 0x1) == 0);
- match &= ((infoHi->reg - infoLo->reg) == 1);
- }
- // If previously used as a pair, it is the same pair?
- if (match && (infoLo->pair || infoHi->pair)) {
- match = (infoLo->pair == infoHi->pair);
- match &= ((infoLo->reg == infoHi->partner) &&
- (infoHi->reg == infoLo->partner));
- }
- if (match) {
- // Can reuse - update the register usage info
- loc.lowReg = infoLo->reg;
- loc.highReg = infoHi->reg;
- loc.location = kLocPhysReg;
- oatMarkPair(cUnit, loc.lowReg, loc.highReg);
- DCHECK(!oatIsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
- return loc;
- }
- // Can't easily reuse - clobber and free any overlaps
- if (infoLo) {
- oatClobber(cUnit, infoLo->reg);
- oatFreeTemp(cUnit, infoLo->reg);
- if (infoLo->pair)
- oatClobber(cUnit, infoLo->partner);
- }
- if (infoHi) {
- oatClobber(cUnit, infoHi->reg);
- oatFreeTemp(cUnit, infoHi->reg);
- if (infoHi->pair)
- oatClobber(cUnit, infoHi->partner);
- }
+ DCHECK(loc.wide);
+ DCHECK(oatCheckCorePoolSanity(cUnit));
+ if (loc.location != kLocPhysReg) {
+ DCHECK((loc.location == kLocDalvikFrame) ||
+ (loc.location == kLocCompilerTemp));
+ // Are the dalvik regs already live in physical registers?
+ RegisterInfo* infoLo = allocLive(cUnit, loc.sRegLow, kAnyReg);
+ RegisterInfo* infoHi = allocLive(cUnit,
+ oatSRegHi(loc.sRegLow), kAnyReg);
+ bool match = true;
+ match = match && (infoLo != NULL);
+ match = match && (infoHi != NULL);
+ // Are they both core or both FP?
+ match = match && (oatIsFpReg(infoLo->reg) == oatIsFpReg(infoHi->reg));
+ // If a pair of floating point singles, are they properly aligned?
+ if (match && oatIsFpReg(infoLo->reg)) {
+ match &= ((infoLo->reg & 0x1) == 0);
+ match &= ((infoHi->reg - infoLo->reg) == 1);
}
- return loc;
+ // If previously used as a pair, it is the same pair?
+ if (match && (infoLo->pair || infoHi->pair)) {
+ match = (infoLo->pair == infoHi->pair);
+ match &= ((infoLo->reg == infoHi->partner) &&
+ (infoHi->reg == infoLo->partner));
+ }
+ if (match) {
+ // Can reuse - update the register usage info
+ loc.lowReg = infoLo->reg;
+ loc.highReg = infoHi->reg;
+ loc.location = kLocPhysReg;
+ oatMarkPair(cUnit, loc.lowReg, loc.highReg);
+ DCHECK(!oatIsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+ return loc;
+ }
+ // Can't easily reuse - clobber and free any overlaps
+ if (infoLo) {
+ oatClobber(cUnit, infoLo->reg);
+ oatFreeTemp(cUnit, infoLo->reg);
+ if (infoLo->pair)
+ oatClobber(cUnit, infoLo->partner);
+ }
+ if (infoHi) {
+ oatClobber(cUnit, infoHi->reg);
+ oatFreeTemp(cUnit, infoHi->reg);
+ if (infoHi->pair)
+ oatClobber(cUnit, infoHi->partner);
+ }
+ }
+ return loc;
}
/* For use in cases we don't know (or care) width */
extern RegLocation oatUpdateRawLoc(CompilationUnit* cUnit, RegLocation loc)
{
- if (loc.wide)
- return oatUpdateLocWide(cUnit, loc);
- else
- return oatUpdateLoc(cUnit, loc);
+ if (loc.wide)
+ return oatUpdateLocWide(cUnit, loc);
+ else
+ return oatUpdateLoc(cUnit, loc);
}
RegLocation evalLocWide(CompilationUnit* cUnit, RegLocation loc,
- int regClass, bool update)
+ int regClass, bool update)
{
- DCHECK(loc.wide);
- int newRegs;
- int lowReg;
- int highReg;
+ DCHECK(loc.wide);
+ int newRegs;
+ int lowReg;
+ int highReg;
- loc = oatUpdateLocWide(cUnit, loc);
+ loc = oatUpdateLocWide(cUnit, loc);
- /* If already in registers, we can assume proper form. Right reg class? */
- if (loc.location == kLocPhysReg) {
- DCHECK_EQ(oatIsFpReg(loc.lowReg), oatIsFpReg(loc.highReg));
- DCHECK(!oatIsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
- if (!regClassMatches(regClass, loc.lowReg)) {
- /* Wrong register class. Reallocate and copy */
- newRegs = oatAllocTypedTempPair(cUnit, loc.fp, regClass);
- lowReg = newRegs & 0xff;
- highReg = (newRegs >> 8) & 0xff;
- oatRegCopyWide(cUnit, lowReg, highReg, loc.lowReg,
- loc.highReg);
- copyRegInfo(cUnit, lowReg, loc.lowReg);
- copyRegInfo(cUnit, highReg, loc.highReg);
- oatClobber(cUnit, loc.lowReg);
- oatClobber(cUnit, loc.highReg);
- loc.lowReg = lowReg;
- loc.highReg = highReg;
- oatMarkPair(cUnit, loc.lowReg, loc.highReg);
- DCHECK(!oatIsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
- }
- return loc;
- }
-
- DCHECK_NE(loc.sRegLow, INVALID_SREG);
- DCHECK_NE(oatSRegHi(loc.sRegLow), INVALID_SREG);
-
- newRegs = oatAllocTypedTempPair(cUnit, loc.fp, regClass);
- loc.lowReg = newRegs & 0xff;
- loc.highReg = (newRegs >> 8) & 0xff;
-
- oatMarkPair(cUnit, loc.lowReg, loc.highReg);
- if (update) {
- loc.location = kLocPhysReg;
- oatMarkLive(cUnit, loc.lowReg, loc.sRegLow);
- oatMarkLive(cUnit, loc.highReg, oatSRegHi(loc.sRegLow));
- }
+ /* If already in registers, we can assume proper form. Right reg class? */
+ if (loc.location == kLocPhysReg) {
+ DCHECK_EQ(oatIsFpReg(loc.lowReg), oatIsFpReg(loc.highReg));
DCHECK(!oatIsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+ if (!regClassMatches(regClass, loc.lowReg)) {
+ /* Wrong register class. Reallocate and copy */
+ newRegs = oatAllocTypedTempPair(cUnit, loc.fp, regClass);
+ lowReg = newRegs & 0xff;
+ highReg = (newRegs >> 8) & 0xff;
+ oatRegCopyWide(cUnit, lowReg, highReg, loc.lowReg,
+ loc.highReg);
+ copyRegInfo(cUnit, lowReg, loc.lowReg);
+ copyRegInfo(cUnit, highReg, loc.highReg);
+ oatClobber(cUnit, loc.lowReg);
+ oatClobber(cUnit, loc.highReg);
+ loc.lowReg = lowReg;
+ loc.highReg = highReg;
+ oatMarkPair(cUnit, loc.lowReg, loc.highReg);
+ DCHECK(!oatIsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+ }
return loc;
+ }
+
+ DCHECK_NE(loc.sRegLow, INVALID_SREG);
+ DCHECK_NE(oatSRegHi(loc.sRegLow), INVALID_SREG);
+
+ newRegs = oatAllocTypedTempPair(cUnit, loc.fp, regClass);
+ loc.lowReg = newRegs & 0xff;
+ loc.highReg = (newRegs >> 8) & 0xff;
+
+ oatMarkPair(cUnit, loc.lowReg, loc.highReg);
+ if (update) {
+ loc.location = kLocPhysReg;
+ oatMarkLive(cUnit, loc.lowReg, loc.sRegLow);
+ oatMarkLive(cUnit, loc.highReg, oatSRegHi(loc.sRegLow));
+ }
+ DCHECK(!oatIsFpReg(loc.lowReg) || ((loc.lowReg & 0x1) == 0));
+ return loc;
}
extern RegLocation oatEvalLoc(CompilationUnit* cUnit, RegLocation loc,
- int regClass, bool update)
+ int regClass, bool update)
{
- int newReg;
+ int newReg;
- if (loc.wide)
- return evalLocWide(cUnit, loc, regClass, update);
+ if (loc.wide)
+ return evalLocWide(cUnit, loc, regClass, update);
- loc = oatUpdateLoc(cUnit, loc);
+ loc = oatUpdateLoc(cUnit, loc);
- if (loc.location == kLocPhysReg) {
- if (!regClassMatches(regClass, loc.lowReg)) {
- /* Wrong register class. Realloc, copy and transfer ownership */
- newReg = oatAllocTypedTemp(cUnit, loc.fp, regClass);
- oatRegCopy(cUnit, newReg, loc.lowReg);
- copyRegInfo(cUnit, newReg, loc.lowReg);
- oatClobber(cUnit, loc.lowReg);
- loc.lowReg = newReg;
- }
- return loc;
- }
-
- DCHECK_NE(loc.sRegLow, INVALID_SREG);
-
- newReg = oatAllocTypedTemp(cUnit, loc.fp, regClass);
- loc.lowReg = newReg;
-
- if (update) {
- loc.location = kLocPhysReg;
- oatMarkLive(cUnit, loc.lowReg, loc.sRegLow);
+ if (loc.location == kLocPhysReg) {
+ if (!regClassMatches(regClass, loc.lowReg)) {
+ /* Wrong register class. Realloc, copy and transfer ownership */
+ newReg = oatAllocTypedTemp(cUnit, loc.fp, regClass);
+ oatRegCopy(cUnit, newReg, loc.lowReg);
+ copyRegInfo(cUnit, newReg, loc.lowReg);
+ oatClobber(cUnit, loc.lowReg);
+ loc.lowReg = newReg;
}
return loc;
+ }
+
+ DCHECK_NE(loc.sRegLow, INVALID_SREG);
+
+ newReg = oatAllocTypedTemp(cUnit, loc.fp, regClass);
+ loc.lowReg = newReg;
+
+ if (update) {
+ loc.location = kLocPhysReg;
+ oatMarkLive(cUnit, loc.lowReg, loc.sRegLow);
+ }
+ return loc;
}
extern RegLocation oatGetDest(CompilationUnit* cUnit, MIR* mir, int num)
{
- RegLocation res = cUnit->regLocation[mir->ssaRep->defs[num]];
- DCHECK(!res.wide);
- return res;
+ RegLocation res = cUnit->regLocation[mir->ssaRep->defs[num]];
+ DCHECK(!res.wide);
+ return res;
}
extern RegLocation oatGetSrc(CompilationUnit* cUnit, MIR* mir, int num)
{
- RegLocation res = cUnit->regLocation[mir->ssaRep->uses[num]];
- DCHECK(!res.wide);
- return res;
+ RegLocation res = cUnit->regLocation[mir->ssaRep->uses[num]];
+ DCHECK(!res.wide);
+ return res;
}
extern RegLocation oatGetRawSrc(CompilationUnit* cUnit, MIR* mir, int num)
{
- RegLocation res = cUnit->regLocation[mir->ssaRep->uses[num]];
- return res;
+ RegLocation res = cUnit->regLocation[mir->ssaRep->uses[num]];
+ return res;
}
extern RegLocation oatGetDestWide(CompilationUnit* cUnit, MIR* mir,
- int low, int high)
+ int low, int high)
{
- RegLocation res = cUnit->regLocation[mir->ssaRep->defs[low]];
- DCHECK(res.wide);
- return res;
+ RegLocation res = cUnit->regLocation[mir->ssaRep->defs[low]];
+ DCHECK(res.wide);
+ return res;
}
extern RegLocation oatGetSrcWide(CompilationUnit* cUnit, MIR* mir,
- int low, int high)
+ int low, int high)
{
- RegLocation res = cUnit->regLocation[mir->ssaRep->uses[low]];
- DCHECK(res.wide);
- return res;
+ RegLocation res = cUnit->regLocation[mir->ssaRep->uses[low]];
+ DCHECK(res.wide);
+ return res;
}
/* USE SSA names to count references of base Dalvik vRegs. */
void oatCountRefs(CompilationUnit *cUnit, BasicBlock* bb,
- RefCounts* coreCounts, RefCounts* fpCounts)
+ RefCounts* coreCounts, RefCounts* fpCounts)
{
- if ((cUnit->disableOpt & (1 << kPromoteRegs)) ||
- !((bb->blockType == kEntryBlock) || (bb->blockType == kExitBlock) ||
- (bb->blockType == kDalvikByteCode))) {
- return;
+ if ((cUnit->disableOpt & (1 << kPromoteRegs)) ||
+ !((bb->blockType == kEntryBlock) || (bb->blockType == kExitBlock) ||
+ (bb->blockType == kDalvikByteCode))) {
+ return;
+ }
+ for (int i = 0; i < cUnit->numSSARegs;) {
+ RegLocation loc = cUnit->regLocation[i];
+ RefCounts* counts = loc.fp ? fpCounts : coreCounts;
+ int pMapIdx = SRegToPMap(cUnit, loc.sRegLow);
+ if (loc.defined) {
+ counts[pMapIdx].count += cUnit->useCounts.elemList[i];
}
- for (int i = 0; i < cUnit->numSSARegs;) {
- RegLocation loc = cUnit->regLocation[i];
- RefCounts* counts = loc.fp ? fpCounts : coreCounts;
- int pMapIdx = SRegToPMap(cUnit, loc.sRegLow);
- if (loc.defined) {
- counts[pMapIdx].count += cUnit->useCounts.elemList[i];
+ if (loc.wide) {
+ if (loc.defined) {
+ if (loc.fp) {
+ counts[pMapIdx].doubleStart = true;
+ counts[pMapIdx+1].count += cUnit->useCounts.elemList[i+1];
}
- if (loc.wide) {
- if (loc.defined) {
- if (loc.fp) {
- counts[pMapIdx].doubleStart = true;
- counts[pMapIdx+1].count += cUnit->useCounts.elemList[i+1];
- }
- }
- i += 2;
- } else {
- i++;
- }
+ }
+ i += 2;
+ } else {
+ i++;
}
+ }
}
/* qsort callback function, sort descending */
int oatSortCounts(const void *val1, const void *val2)
{
- const RefCounts* op1 = (const RefCounts*)val1;
- const RefCounts* op2 = (const RefCounts*)val2;
- return (op1->count == op2->count) ? 0 : (op1->count < op2->count ? 1 : -1);
+ const RefCounts* op1 = (const RefCounts*)val1;
+ const RefCounts* op2 = (const RefCounts*)val2;
+ return (op1->count == op2->count) ? 0 : (op1->count < op2->count ? 1 : -1);
}
void oatDumpCounts(const RefCounts* arr, int size, const char* msg)
{
- LOG(INFO) << msg;
- for (int i = 0; i < size; i++) {
- LOG(INFO) << "sReg[" << arr[i].sReg << "]: " << arr[i].count;
- }
+ LOG(INFO) << msg;
+ for (int i = 0; i < size; i++) {
+ LOG(INFO) << "sReg[" << arr[i].sReg << "]: " << arr[i].count;
+ }
}
/*
@@ -1094,171 +1090,171 @@
*/
extern void oatDoPromotion(CompilationUnit* cUnit)
{
- int regBias = cUnit->numCompilerTemps + 1;
- int dalvikRegs = cUnit->numDalvikRegisters;
- int numRegs = dalvikRegs + regBias;
- const int promotionThreshold = 2;
+ int regBias = cUnit->numCompilerTemps + 1;
+ int dalvikRegs = cUnit->numDalvikRegisters;
+ int numRegs = dalvikRegs + regBias;
+ const int promotionThreshold = 2;
- // Allow target code to add any special registers
- oatAdjustSpillMask(cUnit);
+ // Allow target code to add any special registers
+ oatAdjustSpillMask(cUnit);
- /*
- * Simple register promotion. Just do a static count of the uses
- * of Dalvik registers. Note that we examine the SSA names, but
- * count based on original Dalvik register name. Count refs
- * separately based on type in order to give allocation
- * preference to fp doubles - which must be allocated sequential
- * physical single fp registers started with an even-numbered
- * reg.
- * TUNING: replace with linear scan once we have the ability
- * to describe register live ranges for GC.
- */
- RefCounts *coreRegs = (RefCounts *)
- oatNew(cUnit, sizeof(RefCounts) * numRegs, true, kAllocRegAlloc);
- RefCounts *fpRegs = (RefCounts *)
- oatNew(cUnit, sizeof(RefCounts) * numRegs, true, kAllocRegAlloc);
- // Set ssa names for original Dalvik registers
- for (int i = 0; i < dalvikRegs; i++) {
- coreRegs[i].sReg = fpRegs[i].sReg = i;
+ /*
+ * Simple register promotion. Just do a static count of the uses
+ * of Dalvik registers. Note that we examine the SSA names, but
+ * count based on original Dalvik register name. Count refs
+ * separately based on type in order to give allocation
+ * preference to fp doubles - which must be allocated sequential
+ * physical single fp registers started with an even-numbered
+ * reg.
+ * TUNING: replace with linear scan once we have the ability
+ * to describe register live ranges for GC.
+ */
+ RefCounts *coreRegs = (RefCounts *)
+ oatNew(cUnit, sizeof(RefCounts) * numRegs, true, kAllocRegAlloc);
+ RefCounts *fpRegs = (RefCounts *)
+ oatNew(cUnit, sizeof(RefCounts) * numRegs, true, kAllocRegAlloc);
+ // Set ssa names for original Dalvik registers
+ for (int i = 0; i < dalvikRegs; i++) {
+ coreRegs[i].sReg = fpRegs[i].sReg = i;
+ }
+ // Set ssa name for Method*
+ coreRegs[dalvikRegs].sReg = cUnit->methodSReg;
+ fpRegs[dalvikRegs].sReg = cUnit->methodSReg; // For consistecy
+ // Set ssa names for compilerTemps
+ for (int i = 1; i <= cUnit->numCompilerTemps; i++) {
+ CompilerTemp* ct = (CompilerTemp*)cUnit->compilerTemps.elemList[i];
+ coreRegs[dalvikRegs + i].sReg = ct->sReg;
+ fpRegs[dalvikRegs + i].sReg = ct->sReg;
+ }
+
+ GrowableListIterator iterator;
+ oatGrowableListIteratorInit(&cUnit->blockList, &iterator);
+ while (true) {
+ BasicBlock* bb;
+ bb = (BasicBlock*)oatGrowableListIteratorNext(&iterator);
+ if (bb == NULL) break;
+ oatCountRefs(cUnit, bb, coreRegs, fpRegs);
+ }
+
+ /*
+ * Ideally, we'd allocate doubles starting with an even-numbered
+ * register. Bias the counts to try to allocate any vreg that's
+ * used as the start of a pair first.
+ */
+ for (int i = 0; i < numRegs; i++) {
+ if (fpRegs[i].doubleStart) {
+ fpRegs[i].count *= 2;
}
- // Set ssa name for Method*
- coreRegs[dalvikRegs].sReg = cUnit->methodSReg;
- fpRegs[dalvikRegs].sReg = cUnit->methodSReg; // For consistecy
- // Set ssa names for compilerTemps
- for (int i = 1; i <= cUnit->numCompilerTemps; i++) {
- CompilerTemp* ct = (CompilerTemp*)cUnit->compilerTemps.elemList[i];
- coreRegs[dalvikRegs + i].sReg = ct->sReg;
- fpRegs[dalvikRegs + i].sReg = ct->sReg;
+ }
+
+ // Sort the count arrays
+ qsort(coreRegs, numRegs, sizeof(RefCounts), oatSortCounts);
+ qsort(fpRegs, numRegs, sizeof(RefCounts), oatSortCounts);
+
+ if (cUnit->printMe) {
+ oatDumpCounts(coreRegs, numRegs, "Core regs after sort");
+ oatDumpCounts(fpRegs, numRegs, "Fp regs after sort");
+ }
+
+ if (!(cUnit->disableOpt & (1 << kPromoteRegs))) {
+ // Promote fpRegs
+ for (int i = 0; (i < numRegs) &&
+ (fpRegs[i].count >= promotionThreshold ); i++) {
+ int pMapIdx = SRegToPMap(cUnit, fpRegs[i].sReg);
+ if (cUnit->promotionMap[pMapIdx].fpLocation != kLocPhysReg) {
+ int reg = oatAllocPreservedFPReg(cUnit, fpRegs[i].sReg,
+ fpRegs[i].doubleStart);
+ if (reg < 0) {
+ break; // No more left
+ }
+ }
}
- GrowableListIterator iterator;
- oatGrowableListIteratorInit(&cUnit->blockList, &iterator);
- while (true) {
- BasicBlock* bb;
- bb = (BasicBlock*)oatGrowableListIteratorNext(&iterator);
- if (bb == NULL) break;
- oatCountRefs(cUnit, bb, coreRegs, fpRegs);
+ // Promote core regs
+ for (int i = 0; (i < numRegs) &&
+ (coreRegs[i].count > promotionThreshold); i++) {
+ int pMapIdx = SRegToPMap(cUnit, coreRegs[i].sReg);
+ if (cUnit->promotionMap[pMapIdx].coreLocation !=
+ kLocPhysReg) {
+ int reg = oatAllocPreservedCoreReg(cUnit, coreRegs[i].sReg);
+ if (reg < 0) {
+ break; // No more left
+ }
+ }
}
-
- /*
- * Ideally, we'd allocate doubles starting with an even-numbered
- * register. Bias the counts to try to allocate any vreg that's
- * used as the start of a pair first.
- */
+ } else if (cUnit->qdMode) {
+ oatAllocPreservedCoreReg(cUnit, cUnit->methodSReg);
for (int i = 0; i < numRegs; i++) {
- if (fpRegs[i].doubleStart) {
- fpRegs[i].count *= 2;
- }
+ int reg = oatAllocPreservedCoreReg(cUnit, i);
+ if (reg < 0) {
+ break; // No more left
+ }
}
+ }
- // Sort the count arrays
- qsort(coreRegs, numRegs, sizeof(RefCounts), oatSortCounts);
- qsort(fpRegs, numRegs, sizeof(RefCounts), oatSortCounts);
- if (cUnit->printMe) {
- oatDumpCounts(coreRegs, numRegs, "Core regs after sort");
- oatDumpCounts(fpRegs, numRegs, "Fp regs after sort");
+ // Now, update SSA names to new home locations
+ for (int i = 0; i < cUnit->numSSARegs; i++) {
+ RegLocation *curr = &cUnit->regLocation[i];
+ int pMapIdx = SRegToPMap(cUnit, curr->sRegLow);
+ if (!curr->wide) {
+ if (curr->fp) {
+ if (cUnit->promotionMap[pMapIdx].fpLocation == kLocPhysReg) {
+ curr->location = kLocPhysReg;
+ curr->lowReg = cUnit->promotionMap[pMapIdx].fpReg;
+ curr->home = true;
+ }
+ } else {
+ if (cUnit->promotionMap[pMapIdx].coreLocation == kLocPhysReg) {
+ curr->location = kLocPhysReg;
+ curr->lowReg = cUnit->promotionMap[pMapIdx].coreReg;
+ curr->home = true;
+ }
+ }
+ curr->highReg = INVALID_REG;
+ } else {
+ if (curr->highWord) {
+ continue;
+ }
+ if (curr->fp) {
+ if ((cUnit->promotionMap[pMapIdx].fpLocation == kLocPhysReg) &&
+ (cUnit->promotionMap[pMapIdx+1].fpLocation ==
+ kLocPhysReg)) {
+ int lowReg = cUnit->promotionMap[pMapIdx].fpReg;
+ int highReg = cUnit->promotionMap[pMapIdx+1].fpReg;
+ // Doubles require pair of singles starting at even reg
+ if (((lowReg & 0x1) == 0) && ((lowReg + 1) == highReg)) {
+ curr->location = kLocPhysReg;
+ curr->lowReg = lowReg;
+ curr->highReg = highReg;
+ curr->home = true;
+ }
+ }
+ } else {
+ if ((cUnit->promotionMap[pMapIdx].coreLocation == kLocPhysReg)
+ && (cUnit->promotionMap[pMapIdx+1].coreLocation ==
+ kLocPhysReg)) {
+ curr->location = kLocPhysReg;
+ curr->lowReg = cUnit->promotionMap[pMapIdx].coreReg;
+ curr->highReg = cUnit->promotionMap[pMapIdx+1].coreReg;
+ curr->home = true;
+ }
+ }
}
-
- if (!(cUnit->disableOpt & (1 << kPromoteRegs))) {
- // Promote fpRegs
- for (int i = 0; (i < numRegs) &&
- (fpRegs[i].count >= promotionThreshold ); i++) {
- int pMapIdx = SRegToPMap(cUnit, fpRegs[i].sReg);
- if (cUnit->promotionMap[pMapIdx].fpLocation != kLocPhysReg) {
- int reg = oatAllocPreservedFPReg(cUnit, fpRegs[i].sReg,
- fpRegs[i].doubleStart);
- if (reg < 0) {
- break; // No more left
- }
- }
- }
-
- // Promote core regs
- for (int i = 0; (i < numRegs) &&
- (coreRegs[i].count > promotionThreshold); i++) {
- int pMapIdx = SRegToPMap(cUnit, coreRegs[i].sReg);
- if (cUnit->promotionMap[pMapIdx].coreLocation !=
- kLocPhysReg) {
- int reg = oatAllocPreservedCoreReg(cUnit, coreRegs[i].sReg);
- if (reg < 0) {
- break; // No more left
- }
- }
- }
- } else if (cUnit->qdMode) {
- oatAllocPreservedCoreReg(cUnit, cUnit->methodSReg);
- for (int i = 0; i < numRegs; i++) {
- int reg = oatAllocPreservedCoreReg(cUnit, i);
- if (reg < 0) {
- break; // No more left
- }
- }
- }
-
-
- // Now, update SSA names to new home locations
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- RegLocation *curr = &cUnit->regLocation[i];
- int pMapIdx = SRegToPMap(cUnit, curr->sRegLow);
- if (!curr->wide) {
- if (curr->fp) {
- if (cUnit->promotionMap[pMapIdx].fpLocation == kLocPhysReg) {
- curr->location = kLocPhysReg;
- curr->lowReg = cUnit->promotionMap[pMapIdx].fpReg;
- curr->home = true;
- }
- } else {
- if (cUnit->promotionMap[pMapIdx].coreLocation == kLocPhysReg) {
- curr->location = kLocPhysReg;
- curr->lowReg = cUnit->promotionMap[pMapIdx].coreReg;
- curr->home = true;
- }
- }
- curr->highReg = INVALID_REG;
- } else {
- if (curr->highWord) {
- continue;
- }
- if (curr->fp) {
- if ((cUnit->promotionMap[pMapIdx].fpLocation == kLocPhysReg) &&
- (cUnit->promotionMap[pMapIdx+1].fpLocation ==
- kLocPhysReg)) {
- int lowReg = cUnit->promotionMap[pMapIdx].fpReg;
- int highReg = cUnit->promotionMap[pMapIdx+1].fpReg;
- // Doubles require pair of singles starting at even reg
- if (((lowReg & 0x1) == 0) && ((lowReg + 1) == highReg)) {
- curr->location = kLocPhysReg;
- curr->lowReg = lowReg;
- curr->highReg = highReg;
- curr->home = true;
- }
- }
- } else {
- if ((cUnit->promotionMap[pMapIdx].coreLocation == kLocPhysReg)
- && (cUnit->promotionMap[pMapIdx+1].coreLocation ==
- kLocPhysReg)) {
- curr->location = kLocPhysReg;
- curr->lowReg = cUnit->promotionMap[pMapIdx].coreReg;
- curr->highReg = cUnit->promotionMap[pMapIdx+1].coreReg;
- curr->home = true;
- }
- }
- }
- }
+ }
}
/* Returns sp-relative offset in bytes for a VReg */
extern int oatVRegOffset(CompilationUnit* cUnit, int vReg)
{
- return Frame::GetVRegOffset(cUnit->code_item, cUnit->coreSpillMask,
- cUnit->fpSpillMask, cUnit->frameSize, vReg);
+ return Frame::GetVRegOffset(cUnit->code_item, cUnit->coreSpillMask,
+ cUnit->fpSpillMask, cUnit->frameSize, vReg);
}
/* Returns sp-relative offset in bytes for a SReg */
extern int oatSRegOffset(CompilationUnit* cUnit, int sReg)
{
- return oatVRegOffset(cUnit, SRegToVReg(cUnit, sReg));
+ return oatVRegOffset(cUnit, SRegToVReg(cUnit, sReg));
}
} // namespace art
diff --git a/src/compiler/codegen/arm/ArchFactory.cc b/src/compiler/codegen/arm/ArchFactory.cc
index 9326faf..c20151e 100644
--- a/src/compiler/codegen/arm/ArchFactory.cc
+++ b/src/compiler/codegen/arm/ArchFactory.cc
@@ -31,129 +31,121 @@
bool genNegLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc)
{
- rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- int zReg = oatAllocTemp(cUnit);
- loadConstantNoClobber(cUnit, zReg, 0);
- // Check for destructive overlap
- if (rlResult.lowReg == rlSrc.highReg) {
- int tReg = oatAllocTemp(cUnit);
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
- zReg, rlSrc.lowReg);
- opRegRegReg(cUnit, kOpSbc, rlResult.highReg,
- zReg, tReg);
- oatFreeTemp(cUnit, tReg);
- } else {
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
- zReg, rlSrc.lowReg);
- opRegRegReg(cUnit, kOpSbc, rlResult.highReg,
- zReg, rlSrc.highReg);
- }
- oatFreeTemp(cUnit, zReg);
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ int zReg = oatAllocTemp(cUnit);
+ loadConstantNoClobber(cUnit, zReg, 0);
+ // Check for destructive overlap
+ if (rlResult.lowReg == rlSrc.highReg) {
+ int tReg = oatAllocTemp(cUnit);
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, zReg, rlSrc.lowReg);
+ opRegRegReg(cUnit, kOpSbc, rlResult.highReg, zReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ } else {
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, zReg, rlSrc.lowReg);
+ opRegRegReg(cUnit, kOpSbc, rlResult.highReg, zReg, rlSrc.highReg);
+ }
+ oatFreeTemp(cUnit, zReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
}
int loadHelper(CompilationUnit* cUnit, int offset)
{
- loadWordDisp(cUnit, rSELF, offset, rLR);
- return rLR;
+ loadWordDisp(cUnit, rSELF, offset, rLR);
+ return rLR;
}
void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
{
- int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ /*
+ * On entry, r0, r1, r2 & r3 are live. Let the register allocation
+ * mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with a single temp: r12. This should be enough.
+ */
+ oatLockTemp(cUnit, r0);
+ oatLockTemp(cUnit, r1);
+ oatLockTemp(cUnit, r2);
+ oatLockTemp(cUnit, r3);
+
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
+ ((size_t)cUnit->frameSize <
+ Thread::kStackOverflowReservedBytes));
+ newLIR0(cUnit, kPseudoMethodEntry);
+ if (!skipOverflowCheck) {
+ /* Load stack limit */
+ loadWordDisp(cUnit, rSELF, Thread::StackEndOffset().Int32Value(), r12);
+ }
+ /* Spill core callee saves */
+ newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
+ /* Need to spill any FP regs? */
+ if (cUnit->numFPSpills) {
/*
- * On entry, r0, r1, r2 & r3 are live. Let the register allocation
- * mechanism know so it doesn't try to use any of them when
- * expanding the frame or flushing. This leaves the utility
- * code with a single temp: r12. This should be enough.
+ * NOTE: fp spills are a little different from core spills in that
+ * they are pushed as a contiguous block. When promoting from
+ * the fp set, we must allocate all singles from s16..highest-promoted
*/
- oatLockTemp(cUnit, r0);
- oatLockTemp(cUnit, r1);
- oatLockTemp(cUnit, r2);
- oatLockTemp(cUnit, r3);
+ newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
+ }
+ if (!skipOverflowCheck) {
+ opRegRegImm(cUnit, kOpSub, rLR, rSP, cUnit->frameSize - (spillCount * 4));
+ genRegRegCheck(cUnit, kCondCc, rLR, r12, NULL, kThrowStackOverflow);
+ opRegCopy(cUnit, rSP, rLR); // Establish stack
+ } else {
+ opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - (spillCount * 4));
+ }
- /*
- * We can safely skip the stack overflow check if we're
- * a leaf *and* our frame size < fudge factor.
- */
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- ((size_t)cUnit->frameSize <
- Thread::kStackOverflowReservedBytes));
- newLIR0(cUnit, kPseudoMethodEntry);
- if (!skipOverflowCheck) {
- /* Load stack limit */
- loadWordDisp(cUnit, rSELF,
- Thread::StackEndOffset().Int32Value(), r12);
- }
- /* Spill core callee saves */
- newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
- /* Need to spill any FP regs? */
- if (cUnit->numFPSpills) {
- /*
- * NOTE: fp spills are a little different from core spills in that
- * they are pushed as a contiguous block. When promoting from
- * the fp set, we must allocate all singles from s16..highest-promoted
- */
- newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
- }
- if (!skipOverflowCheck) {
- opRegRegImm(cUnit, kOpSub, rLR, rSP,
- cUnit->frameSize - (spillCount * 4));
- genRegRegCheck(cUnit, kCondCc, rLR, r12, NULL,
- kThrowStackOverflow);
- opRegCopy(cUnit, rSP, rLR); // Establish stack
- } else {
- opRegImm(cUnit, kOpSub, rSP,
- cUnit->frameSize - (spillCount * 4));
- }
+ flushIns(cUnit);
- flushIns(cUnit);
+ if (cUnit->genDebugger) {
+ // Refresh update debugger callout
+ loadWordDisp(cUnit, rSELF,
+ ENTRYPOINT_OFFSET(pUpdateDebuggerFromCode), rSUSPEND);
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
+ }
- if (cUnit->genDebugger) {
- // Refresh update debugger callout
- loadWordDisp(cUnit, rSELF,
- ENTRYPOINT_OFFSET(pUpdateDebuggerFromCode), rSUSPEND);
- genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
- }
-
- oatFreeTemp(cUnit, r0);
- oatFreeTemp(cUnit, r1);
- oatFreeTemp(cUnit, r2);
- oatFreeTemp(cUnit, r3);
+ oatFreeTemp(cUnit, r0);
+ oatFreeTemp(cUnit, r1);
+ oatFreeTemp(cUnit, r2);
+ oatFreeTemp(cUnit, r3);
}
void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
{
- int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
- /*
- * In the exit path, r0/r1 are live - make sure they aren't
- * allocated by the register utilities as temps.
- */
- oatLockTemp(cUnit, r0);
- oatLockTemp(cUnit, r1);
+ int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ /*
+ * In the exit path, r0/r1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ oatLockTemp(cUnit, r0);
+ oatLockTemp(cUnit, r1);
- newLIR0(cUnit, kPseudoMethodExit);
- /* If we're compiling for the debugger, generate an update callout */
- if (cUnit->genDebugger) {
- genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
- }
- opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (spillCount * 4));
- /* Need to restore any FP callee saves? */
- if (cUnit->numFPSpills) {
- newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
- }
- if (cUnit->coreSpillMask & (1 << rLR)) {
- /* Unspill rLR to rPC */
- cUnit->coreSpillMask &= ~(1 << rLR);
- cUnit->coreSpillMask |= (1 << rPC);
- }
- newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
- if (!(cUnit->coreSpillMask & (1 << rPC))) {
- /* We didn't pop to rPC, so must do a bv rLR */
- newLIR1(cUnit, kThumbBx, rLR);
- }
+ newLIR0(cUnit, kPseudoMethodExit);
+ /* If we're compiling for the debugger, generate an update callout */
+ if (cUnit->genDebugger) {
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
+ }
+ opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (spillCount * 4));
+ /* Need to restore any FP callee saves? */
+ if (cUnit->numFPSpills) {
+ newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
+ }
+ if (cUnit->coreSpillMask & (1 << rLR)) {
+ /* Unspill rLR to rPC */
+ cUnit->coreSpillMask &= ~(1 << rLR);
+ cUnit->coreSpillMask |= (1 << rPC);
+ }
+ newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
+ if (!(cUnit->coreSpillMask & (1 << rPC))) {
+ /* We didn't pop to rPC, so must do a bv rLR */
+ newLIR1(cUnit, kThumbBx, rLR);
+ }
}
/*
@@ -163,55 +155,55 @@
*/
void removeRedundantBranches(CompilationUnit* cUnit)
{
- LIR* thisLIR;
+ LIR* thisLIR;
- for (thisLIR = (LIR*) cUnit->firstLIRInsn;
- thisLIR != (LIR*) cUnit->lastLIRInsn;
- thisLIR = NEXT_LIR(thisLIR)) {
+ for (thisLIR = (LIR*) cUnit->firstLIRInsn;
+ thisLIR != (LIR*) cUnit->lastLIRInsn;
+ thisLIR = NEXT_LIR(thisLIR)) {
- /* Branch to the next instruction */
- if ((thisLIR->opcode == kThumbBUncond) ||
- (thisLIR->opcode == kThumb2BUncond)) {
- LIR* nextLIR = thisLIR;
+ /* Branch to the next instruction */
+ if ((thisLIR->opcode == kThumbBUncond) ||
+ (thisLIR->opcode == kThumb2BUncond)) {
+ LIR* nextLIR = thisLIR;
- while (true) {
- nextLIR = NEXT_LIR(nextLIR);
+ while (true) {
+ nextLIR = NEXT_LIR(nextLIR);
- /*
- * Is the branch target the next instruction?
- */
- if (nextLIR == (LIR*) thisLIR->target) {
- thisLIR->flags.isNop = true;
- break;
- }
-
- /*
- * Found real useful stuff between the branch and the target.
- * Need to explicitly check the lastLIRInsn here because it
- * might be the last real instruction.
- */
- if (!isPseudoOpcode(nextLIR->opcode) ||
- (nextLIR = (LIR*) cUnit->lastLIRInsn))
- break;
- }
+ /*
+ * Is the branch target the next instruction?
+ */
+ if (nextLIR == (LIR*) thisLIR->target) {
+ thisLIR->flags.isNop = true;
+ break;
}
+
+ /*
+ * Found real useful stuff between the branch and the target.
+ * Need to explicitly check the lastLIRInsn here because it
+ * might be the last real instruction.
+ */
+ if (!isPseudoOpcode(nextLIR->opcode) ||
+ (nextLIR = (LIR*) cUnit->lastLIRInsn))
+ break;
+ }
}
+ }
}
/* Common initialization routine for an architecture family */
bool oatArchInit()
{
- int i;
+ int i;
- for (i = 0; i < kArmLast; i++) {
- if (EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
- " is wrong: expecting " << i << ", seeing " <<
- (int)EncodingMap[i].opcode;
- }
+ for (i = 0; i < kArmLast; i++) {
+ if (EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << (int)EncodingMap[i].opcode;
}
+ }
- return oatArchVariantInit();
+ return oatArchVariantInit();
}
} // namespace art
diff --git a/src/compiler/codegen/arm/ArchUtility.cc b/src/compiler/codegen/arm/ArchUtility.cc
index 4346bda..725200a 100644
--- a/src/compiler/codegen/arm/ArchUtility.cc
+++ b/src/compiler/codegen/arm/ArchUtility.cc
@@ -24,109 +24,109 @@
ArmConditionCode oatArmConditionEncoding(ConditionCode code)
{
- ArmConditionCode res;
- switch (code) {
- case kCondEq: res = kArmCondEq; break;
- case kCondNe: res = kArmCondNe; break;
- case kCondCs: res = kArmCondCs; break;
- case kCondCc: res = kArmCondCc; break;
- case kCondMi: res = kArmCondMi; break;
- case kCondPl: res = kArmCondPl; break;
- case kCondVs: res = kArmCondVs; break;
- case kCondVc: res = kArmCondVc; break;
- case kCondHi: res = kArmCondHi; break;
- case kCondLs: res = kArmCondLs; break;
- case kCondGe: res = kArmCondGe; break;
- case kCondLt: res = kArmCondLt; break;
- case kCondGt: res = kArmCondGt; break;
- case kCondLe: res = kArmCondLe; break;
- case kCondAl: res = kArmCondAl; break;
- case kCondNv: res = kArmCondNv; break;
- default:
- LOG(FATAL) << "Bad condition code" << (int)code;
- res = (ArmConditionCode)0; // Quiet gcc
- }
- return res;
+ ArmConditionCode res;
+ switch (code) {
+ case kCondEq: res = kArmCondEq; break;
+ case kCondNe: res = kArmCondNe; break;
+ case kCondCs: res = kArmCondCs; break;
+ case kCondCc: res = kArmCondCc; break;
+ case kCondMi: res = kArmCondMi; break;
+ case kCondPl: res = kArmCondPl; break;
+ case kCondVs: res = kArmCondVs; break;
+ case kCondVc: res = kArmCondVc; break;
+ case kCondHi: res = kArmCondHi; break;
+ case kCondLs: res = kArmCondLs; break;
+ case kCondGe: res = kArmCondGe; break;
+ case kCondLt: res = kArmCondLt; break;
+ case kCondGt: res = kArmCondGt; break;
+ case kCondLe: res = kArmCondLe; break;
+ case kCondAl: res = kArmCondAl; break;
+ case kCondNv: res = kArmCondNv; break;
+ default:
+ LOG(FATAL) << "Bad condition code" << (int)code;
+ res = (ArmConditionCode)0; // Quiet gcc
+ }
+ return res;
}
static const char* coreRegNames[16] = {
- "r0",
- "r1",
- "r2",
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- "r8",
- "rSELF",
- "r10",
- "r11",
- "r12",
- "sp",
- "lr",
- "pc",
+ "r0",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "rSELF",
+ "r10",
+ "r11",
+ "r12",
+ "sp",
+ "lr",
+ "pc",
};
static const char* shiftNames[4] = {
- "lsl",
- "lsr",
- "asr",
- "ror"};
+ "lsl",
+ "lsr",
+ "asr",
+ "ror"};
/* Decode and print a ARM register name */
char* decodeRegList(int opcode, int vector, char* buf)
{
- int i;
- bool printed = false;
- buf[0] = 0;
- for (i = 0; i < 16; i++, vector >>= 1) {
- if (vector & 0x1) {
- int regId = i;
- if (opcode == kThumbPush && i == 8) {
- regId = r14lr;
- } else if (opcode == kThumbPop && i == 8) {
- regId = r15pc;
- }
- if (printed) {
- sprintf(buf + strlen(buf), ", r%d", regId);
- } else {
- printed = true;
- sprintf(buf, "r%d", regId);
- }
- }
+ int i;
+ bool printed = false;
+ buf[0] = 0;
+ for (i = 0; i < 16; i++, vector >>= 1) {
+ if (vector & 0x1) {
+ int regId = i;
+ if (opcode == kThumbPush && i == 8) {
+ regId = r14lr;
+ } else if (opcode == kThumbPop && i == 8) {
+ regId = r15pc;
+ }
+ if (printed) {
+ sprintf(buf + strlen(buf), ", r%d", regId);
+ } else {
+ printed = true;
+ sprintf(buf, "r%d", regId);
+ }
}
- return buf;
+ }
+ return buf;
}
char* decodeFPCSRegList(int count, int base, char* buf)
{
- sprintf(buf, "s%d", base);
- for (int i = 1; i < count; i++) {
- sprintf(buf + strlen(buf), ", s%d",base + i);
- }
- return buf;
+ sprintf(buf, "s%d", base);
+ for (int i = 1; i < count; i++) {
+ sprintf(buf + strlen(buf), ", s%d",base + i);
+ }
+ return buf;
}
int expandImmediate(int value)
{
- int mode = (value & 0xf00) >> 8;
- u4 bits = value & 0xff;
- switch (mode) {
- case 0:
- return bits;
- case 1:
- return (bits << 16) | bits;
- case 2:
- return (bits << 24) | (bits << 8);
- case 3:
- return (bits << 24) | (bits << 16) | (bits << 8) | bits;
- default:
- break;
- }
- bits = (bits | 0x80) << 24;
- return bits >> (((value & 0xf80) >> 7) - 8);
+ int mode = (value & 0xf00) >> 8;
+ u4 bits = value & 0xff;
+ switch (mode) {
+ case 0:
+ return bits;
+ case 1:
+ return (bits << 16) | bits;
+ case 2:
+ return (bits << 24) | (bits << 8);
+ case 3:
+ return (bits << 24) | (bits << 16) | (bits << 8) | bits;
+ default:
+ break;
+ }
+ bits = (bits | 0x80) << 24;
+ return bits >> (((value & 0xf80) >> 7) - 8);
}
const char* ccNames[] = {"eq","ne","cs","cc","mi","pl","vs","vc",
@@ -135,190 +135,188 @@
* Interpret a format string and build a string no longer than size
* See format key in Assemble.c.
*/
-std::string buildInsnString(const char* fmt, LIR* lir,
- unsigned char* baseAddr)
+std::string buildInsnString(const char* fmt, LIR* lir, unsigned char* baseAddr)
{
- std::string buf;
- int i;
- const char* fmtEnd = &fmt[strlen(fmt)];
- char tbuf[256];
- const char* name;
- char nc;
- while (fmt < fmtEnd) {
- int operand;
- if (*fmt == '!') {
- fmt++;
- DCHECK_LT(fmt, fmtEnd);
- nc = *fmt++;
- if (nc=='!') {
- strcpy(tbuf, "!");
- } else {
- DCHECK_LT(fmt, fmtEnd);
- DCHECK_LT((unsigned)(nc-'0'), 4U);
- operand = lir->operands[nc-'0'];
- switch (*fmt++) {
- case 'H':
- if (operand != 0) {
- sprintf(tbuf, ", %s %d",shiftNames[operand & 0x3],
- operand >> 2);
- } else {
- strcpy(tbuf,"");
- }
- break;
- case 'B':
- switch (operand) {
- case kSY:
- name = "sy";
- break;
- case kST:
- name = "st";
- break;
- case kISH:
- name = "ish";
- break;
- case kISHST:
- name = "ishst";
- break;
- case kNSH:
- name = "nsh";
- break;
- case kNSHST:
- name = "shst";
- break;
- default:
- name = "DecodeError2";
- break;
- }
- strcpy(tbuf, name);
- break;
- case 'b':
- strcpy(tbuf,"0000");
- for (i=3; i>= 0; i--) {
- tbuf[i] += operand & 1;
- operand >>= 1;
- }
- break;
- case 'n':
- operand = ~expandImmediate(operand);
- sprintf(tbuf,"%d [%#x]", operand, operand);
- break;
- case 'm':
- operand = expandImmediate(operand);
- sprintf(tbuf,"%d [%#x]", operand, operand);
- break;
- case 's':
- sprintf(tbuf,"s%d",operand & FP_REG_MASK);
- break;
- case 'S':
- sprintf(tbuf,"d%d",(operand & FP_REG_MASK) >> 1);
- break;
- case 'h':
- sprintf(tbuf,"%04x", operand);
- break;
- case 'M':
- case 'd':
- sprintf(tbuf,"%d", operand);
- break;
- case 'C':
- sprintf(tbuf,"%s",coreRegNames[operand]);
- break;
- case 'E':
- sprintf(tbuf,"%d", operand*4);
- break;
- case 'F':
- sprintf(tbuf,"%d", operand*2);
- break;
- case 'c':
- strcpy(tbuf, ccNames[operand]);
- break;
- case 't':
- sprintf(tbuf,"0x%08x (L%p)",
- (int) baseAddr + lir->offset + 4 +
- (operand << 1),
- lir->target);
- break;
- case 'u': {
- int offset_1 = lir->operands[0];
- int offset_2 = NEXT_LIR(lir)->operands[0];
- intptr_t target =
- ((((intptr_t) baseAddr + lir->offset + 4) &
- ~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
- 0xfffffffc;
- sprintf(tbuf, "%p", (void *) target);
- break;
- }
+ std::string buf;
+ int i;
+ const char* fmtEnd = &fmt[strlen(fmt)];
+ char tbuf[256];
+ const char* name;
+ char nc;
+ while (fmt < fmtEnd) {
+ int operand;
+ if (*fmt == '!') {
+ fmt++;
+ DCHECK_LT(fmt, fmtEnd);
+ nc = *fmt++;
+ if (nc=='!') {
+ strcpy(tbuf, "!");
+ } else {
+ DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT((unsigned)(nc-'0'), 4U);
+ operand = lir->operands[nc-'0'];
+ switch (*fmt++) {
+ case 'H':
+ if (operand != 0) {
+ sprintf(tbuf, ", %s %d",shiftNames[operand & 0x3], operand >> 2);
+ } else {
+ strcpy(tbuf,"");
+ }
+ break;
+ case 'B':
+ switch (operand) {
+ case kSY:
+ name = "sy";
+ break;
+ case kST:
+ name = "st";
+ break;
+ case kISH:
+ name = "ish";
+ break;
+ case kISHST:
+ name = "ishst";
+ break;
+ case kNSH:
+ name = "nsh";
+ break;
+ case kNSHST:
+ name = "shst";
+ break;
+ default:
+ name = "DecodeError2";
+ break;
+ }
+ strcpy(tbuf, name);
+ break;
+ case 'b':
+ strcpy(tbuf,"0000");
+ for (i=3; i>= 0; i--) {
+ tbuf[i] += operand & 1;
+ operand >>= 1;
+ }
+ break;
+ case 'n':
+ operand = ~expandImmediate(operand);
+ sprintf(tbuf,"%d [%#x]", operand, operand);
+ break;
+ case 'm':
+ operand = expandImmediate(operand);
+ sprintf(tbuf,"%d [%#x]", operand, operand);
+ break;
+ case 's':
+ sprintf(tbuf,"s%d",operand & FP_REG_MASK);
+ break;
+ case 'S':
+ sprintf(tbuf,"d%d",(operand & FP_REG_MASK) >> 1);
+ break;
+ case 'h':
+ sprintf(tbuf,"%04x", operand);
+ break;
+ case 'M':
+ case 'd':
+ sprintf(tbuf,"%d", operand);
+ break;
+ case 'C':
+ sprintf(tbuf,"%s",coreRegNames[operand]);
+ break;
+ case 'E':
+ sprintf(tbuf,"%d", operand*4);
+ break;
+ case 'F':
+ sprintf(tbuf,"%d", operand*2);
+ break;
+ case 'c':
+ strcpy(tbuf, ccNames[operand]);
+ break;
+ case 't':
+ sprintf(tbuf,"0x%08x (L%p)",
+ (int) baseAddr + lir->offset + 4 +
+ (operand << 1),
+ lir->target);
+ break;
+ case 'u': {
+ int offset_1 = lir->operands[0];
+ int offset_2 = NEXT_LIR(lir)->operands[0];
+ intptr_t target =
+ ((((intptr_t) baseAddr + lir->offset + 4) &
+ ~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
+ 0xfffffffc;
+ sprintf(tbuf, "%p", (void *) target);
+ break;
+ }
- /* Nothing to print for BLX_2 */
- case 'v':
- strcpy(tbuf, "see above");
- break;
- case 'R':
- decodeRegList(lir->opcode, operand, tbuf);
- break;
- case 'P':
- decodeFPCSRegList(operand, 16, tbuf);
- break;
- case 'Q':
- decodeFPCSRegList(operand, 0, tbuf);
- break;
- default:
- strcpy(tbuf,"DecodeError1");
- break;
- }
- buf += tbuf;
- }
- } else {
- buf += *fmt++;
+ /* Nothing to print for BLX_2 */
+ case 'v':
+ strcpy(tbuf, "see above");
+ break;
+ case 'R':
+ decodeRegList(lir->opcode, operand, tbuf);
+ break;
+ case 'P':
+ decodeFPCSRegList(operand, 16, tbuf);
+ break;
+ case 'Q':
+ decodeFPCSRegList(operand, 0, tbuf);
+ break;
+ default:
+ strcpy(tbuf,"DecodeError1");
+ break;
}
+ buf += tbuf;
+ }
+ } else {
+ buf += *fmt++;
}
- return buf;
+ }
+ return buf;
}
void oatDumpResourceMask(LIR* lir, u8 mask, const char* prefix)
{
- char buf[256];
- buf[0] = 0;
- LIR* armLIR = (LIR*) lir;
+ char buf[256];
+ buf[0] = 0;
+ LIR* armLIR = (LIR*) lir;
- if (mask == ENCODE_ALL) {
- strcpy(buf, "all");
- } else {
- char num[8];
- int i;
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
- for (i = 0; i < kRegEnd; i++) {
- if (mask & (1ULL << i)) {
- sprintf(num, "%d ", i);
- strcat(buf, num);
- }
- }
-
- if (mask & ENCODE_CCODE) {
- strcat(buf, "cc ");
- }
- if (mask & ENCODE_FP_STATUS) {
- strcat(buf, "fpcc ");
- }
-
- /* Memory bits */
- if (armLIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", armLIR->aliasInfo & 0xffff,
- (armLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
- }
- if (mask & ENCODE_LITERAL) {
- strcat(buf, "lit ");
- }
-
- if (mask & ENCODE_HEAP_REF) {
- strcat(buf, "heap ");
- }
- if (mask & ENCODE_MUST_NOT_ALIAS) {
- strcat(buf, "noalias ");
- }
+ for (i = 0; i < kRegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
}
- if (buf[0]) {
- LOG(INFO) << prefix << ": " << buf;
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
}
+ if (mask & ENCODE_FP_STATUS) {
+ strcat(buf, "fpcc ");
+ }
+
+ /* Memory bits */
+ if (armLIR && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", armLIR->aliasInfo & 0xffff,
+ (armLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
}
diff --git a/src/compiler/codegen/arm/ArmLIR.h b/src/compiler/codegen/arm/ArmLIR.h
index 484892a..fc5aa40 100644
--- a/src/compiler/codegen/arm/ArmLIR.h
+++ b/src/compiler/codegen/arm/ArmLIR.h
@@ -132,20 +132,20 @@
#define LOC_C_RETURN_WIDE_DOUBLE LOC_C_RETURN_WIDE
enum ResourceEncodingPos {
- kGPReg0 = 0,
- kRegSP = 13,
- kRegLR = 14,
- kRegPC = 15,
- kFPReg0 = 16,
- kFPReg16 = 32,
- kRegEnd = 48,
- kCCode = kRegEnd,
- kFPStatus, // FP status word
- // The following four bits are for memory disambiguation
- kDalvikReg, // 1 Dalvik Frame (can be fully disambiguated)
- kLiteral, // 2 Literal pool (can be fully disambiguated)
- kHeapRef, // 3 Somewhere on the heap (alias with any other heap)
- kMustNotAlias, // 4 Guaranteed to be non-alias (eg *(r6+x))
+ kGPReg0 = 0,
+ kRegSP = 13,
+ kRegLR = 14,
+ kRegPC = 15,
+ kFPReg0 = 16,
+ kFPReg16 = 32,
+ kRegEnd = 48,
+ kCCode = kRegEnd,
+ kFPStatus, // FP status word
+ // The following four bits are for memory disambiguation
+ kDalvikReg, // 1 Dalvik Frame (can be fully disambiguated)
+ kLiteral, // 2 Literal pool (can be fully disambiguated)
+ kHeapRef, // 3 Somewhere on the heap (alias with any other heap)
+ kMustNotAlias, // 4 Guaranteed to be non-alias (eg *(r6+x))
};
#define ENCODE_REG_LIST(N) ((u8) N)
@@ -164,7 +164,7 @@
#define ENCODE_ALL (~0ULL)
#define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
- ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
+ ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff)
#define DECODE_ALIAS_INFO_WIDE(X) ((X & 0x80000000) ? 1 : 0)
@@ -176,73 +176,73 @@
* rPC, rFP, and rSELF are for architecture-independent code to use.
*/
enum NativeRegisterPool {
- r0 = 0,
- r1 = 1,
- r2 = 2,
- r3 = 3,
- rSUSPEND = 4,
- r5 = 5,
- r6 = 6,
- r7 = 7,
- r8 = 8,
- rSELF = 9,
- r10 = 10,
- r11 = 11,
- r12 = 12,
- r13sp = 13,
- rSP = 13,
- r14lr = 14,
- rLR = 14,
- r15pc = 15,
- rPC = 15,
- fr0 = 0 + FP_REG_OFFSET,
- fr1 = 1 + FP_REG_OFFSET,
- fr2 = 2 + FP_REG_OFFSET,
- fr3 = 3 + FP_REG_OFFSET,
- fr4 = 4 + FP_REG_OFFSET,
- fr5 = 5 + FP_REG_OFFSET,
- fr6 = 6 + FP_REG_OFFSET,
- fr7 = 7 + FP_REG_OFFSET,
- fr8 = 8 + FP_REG_OFFSET,
- fr9 = 9 + FP_REG_OFFSET,
- fr10 = 10 + FP_REG_OFFSET,
- fr11 = 11 + FP_REG_OFFSET,
- fr12 = 12 + FP_REG_OFFSET,
- fr13 = 13 + FP_REG_OFFSET,
- fr14 = 14 + FP_REG_OFFSET,
- fr15 = 15 + FP_REG_OFFSET,
- fr16 = 16 + FP_REG_OFFSET,
- fr17 = 17 + FP_REG_OFFSET,
- fr18 = 18 + FP_REG_OFFSET,
- fr19 = 19 + FP_REG_OFFSET,
- fr20 = 20 + FP_REG_OFFSET,
- fr21 = 21 + FP_REG_OFFSET,
- fr22 = 22 + FP_REG_OFFSET,
- fr23 = 23 + FP_REG_OFFSET,
- fr24 = 24 + FP_REG_OFFSET,
- fr25 = 25 + FP_REG_OFFSET,
- fr26 = 26 + FP_REG_OFFSET,
- fr27 = 27 + FP_REG_OFFSET,
- fr28 = 28 + FP_REG_OFFSET,
- fr29 = 29 + FP_REG_OFFSET,
- fr30 = 30 + FP_REG_OFFSET,
- fr31 = 31 + FP_REG_OFFSET,
- dr0 = fr0 + FP_DOUBLE,
- dr1 = fr2 + FP_DOUBLE,
- dr2 = fr4 + FP_DOUBLE,
- dr3 = fr6 + FP_DOUBLE,
- dr4 = fr8 + FP_DOUBLE,
- dr5 = fr10 + FP_DOUBLE,
- dr6 = fr12 + FP_DOUBLE,
- dr7 = fr14 + FP_DOUBLE,
- dr8 = fr16 + FP_DOUBLE,
- dr9 = fr18 + FP_DOUBLE,
- dr10 = fr20 + FP_DOUBLE,
- dr11 = fr22 + FP_DOUBLE,
- dr12 = fr24 + FP_DOUBLE,
- dr13 = fr26 + FP_DOUBLE,
- dr14 = fr28 + FP_DOUBLE,
- dr15 = fr30 + FP_DOUBLE,
+ r0 = 0,
+ r1 = 1,
+ r2 = 2,
+ r3 = 3,
+ rSUSPEND = 4,
+ r5 = 5,
+ r6 = 6,
+ r7 = 7,
+ r8 = 8,
+ rSELF = 9,
+ r10 = 10,
+ r11 = 11,
+ r12 = 12,
+ r13sp = 13,
+ rSP = 13,
+ r14lr = 14,
+ rLR = 14,
+ r15pc = 15,
+ rPC = 15,
+ fr0 = 0 + FP_REG_OFFSET,
+ fr1 = 1 + FP_REG_OFFSET,
+ fr2 = 2 + FP_REG_OFFSET,
+ fr3 = 3 + FP_REG_OFFSET,
+ fr4 = 4 + FP_REG_OFFSET,
+ fr5 = 5 + FP_REG_OFFSET,
+ fr6 = 6 + FP_REG_OFFSET,
+ fr7 = 7 + FP_REG_OFFSET,
+ fr8 = 8 + FP_REG_OFFSET,
+ fr9 = 9 + FP_REG_OFFSET,
+ fr10 = 10 + FP_REG_OFFSET,
+ fr11 = 11 + FP_REG_OFFSET,
+ fr12 = 12 + FP_REG_OFFSET,
+ fr13 = 13 + FP_REG_OFFSET,
+ fr14 = 14 + FP_REG_OFFSET,
+ fr15 = 15 + FP_REG_OFFSET,
+ fr16 = 16 + FP_REG_OFFSET,
+ fr17 = 17 + FP_REG_OFFSET,
+ fr18 = 18 + FP_REG_OFFSET,
+ fr19 = 19 + FP_REG_OFFSET,
+ fr20 = 20 + FP_REG_OFFSET,
+ fr21 = 21 + FP_REG_OFFSET,
+ fr22 = 22 + FP_REG_OFFSET,
+ fr23 = 23 + FP_REG_OFFSET,
+ fr24 = 24 + FP_REG_OFFSET,
+ fr25 = 25 + FP_REG_OFFSET,
+ fr26 = 26 + FP_REG_OFFSET,
+ fr27 = 27 + FP_REG_OFFSET,
+ fr28 = 28 + FP_REG_OFFSET,
+ fr29 = 29 + FP_REG_OFFSET,
+ fr30 = 30 + FP_REG_OFFSET,
+ fr31 = 31 + FP_REG_OFFSET,
+ dr0 = fr0 + FP_DOUBLE,
+ dr1 = fr2 + FP_DOUBLE,
+ dr2 = fr4 + FP_DOUBLE,
+ dr3 = fr6 + FP_DOUBLE,
+ dr4 = fr8 + FP_DOUBLE,
+ dr5 = fr10 + FP_DOUBLE,
+ dr6 = fr12 + FP_DOUBLE,
+ dr7 = fr14 + FP_DOUBLE,
+ dr8 = fr16 + FP_DOUBLE,
+ dr9 = fr18 + FP_DOUBLE,
+ dr10 = fr20 + FP_DOUBLE,
+ dr11 = fr22 + FP_DOUBLE,
+ dr12 = fr24 + FP_DOUBLE,
+ dr13 = fr26 + FP_DOUBLE,
+ dr14 = fr28 + FP_DOUBLE,
+ dr15 = fr30 + FP_DOUBLE,
};
/* Target-independent aliases */
@@ -256,30 +256,30 @@
/* Shift encodings */
enum ArmShiftEncodings {
- kArmLsl = 0x0,
- kArmLsr = 0x1,
- kArmAsr = 0x2,
- kArmRor = 0x3
+ kArmLsl = 0x0,
+ kArmLsr = 0x1,
+ kArmAsr = 0x2,
+ kArmRor = 0x3
};
/* Thumb condition encodings */
enum ArmConditionCode {
- kArmCondEq = 0x0, /* 0000 */
- kArmCondNe = 0x1, /* 0001 */
- kArmCondCs = 0x2, /* 0010 */
- kArmCondCc = 0x3, /* 0011 */
- kArmCondMi = 0x4, /* 0100 */
- kArmCondPl = 0x5, /* 0101 */
- kArmCondVs = 0x6, /* 0110 */
- kArmCondVc = 0x7, /* 0111 */
- kArmCondHi = 0x8, /* 1000 */
- kArmCondLs = 0x9, /* 1001 */
- kArmCondGe = 0xa, /* 1010 */
- kArmCondLt = 0xb, /* 1011 */
- kArmCondGt = 0xc, /* 1100 */
- kArmCondLe = 0xd, /* 1101 */
- kArmCondAl = 0xe, /* 1110 */
- kArmCondNv = 0xf, /* 1111 */
+ kArmCondEq = 0x0, /* 0000 */
+ kArmCondNe = 0x1, /* 0001 */
+ kArmCondCs = 0x2, /* 0010 */
+ kArmCondCc = 0x3, /* 0011 */
+ kArmCondMi = 0x4, /* 0100 */
+ kArmCondPl = 0x5, /* 0101 */
+ kArmCondVs = 0x6, /* 0110 */
+ kArmCondVc = 0x7, /* 0111 */
+ kArmCondHi = 0x8, /* 1000 */
+ kArmCondLs = 0x9, /* 1001 */
+ kArmCondGe = 0xa, /* 1010 */
+ kArmCondLt = 0xb, /* 1011 */
+ kArmCondGt = 0xc, /* 1100 */
+ kArmCondLe = 0xd, /* 1101 */
+ kArmCondAl = 0xe, /* 1110 */
+ kArmCondNv = 0xf, /* 1111 */
};
#define isPseudoOpcode(opcode) ((int)(opcode) < 0)
@@ -290,383 +290,383 @@
* Assemble.cc.
*/
enum ArmOpcode {
- kPseudoIntrinsicRetry = -16,
- kPseudoSuspendTarget = -15,
- kPseudoThrowTarget = -14,
- kPseudoCaseLabel = -13,
- kPseudoMethodEntry = -12,
- kPseudoMethodExit = -11,
- kPseudoBarrier = -10,
- kPseudoExtended = -9,
- kPseudoSSARep = -8,
- kPseudoEntryBlock = -7,
- kPseudoExitBlock = -6,
- kPseudoTargetLabel = -5,
- kPseudoDalvikByteCodeBoundary = -4,
- kPseudoPseudoAlign4 = -3,
- kPseudoEHBlockLabel = -2,
- kPseudoNormalBlockLabel = -1,
- /************************************************************************/
- kArm16BitData, /* DATA [0] rd[15..0] */
- kThumbAdcRR, /* adc [0100000101] rm[5..3] rd[2..0] */
- kThumbAddRRI3, /* add(1) [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/
- kThumbAddRI8, /* add(2) [00110] rd[10..8] imm_8[7..0] */
- kThumbAddRRR, /* add(3) [0001100] rm[8..6] rn[5..3] rd[2..0] */
- kThumbAddRRLH, /* add(4) [01000100] H12[01] rm[5..3] rd[2..0] */
- kThumbAddRRHL, /* add(4) [01001000] H12[10] rm[5..3] rd[2..0] */
- kThumbAddRRHH, /* add(4) [01001100] H12[11] rm[5..3] rd[2..0] */
- kThumbAddPcRel, /* add(5) [10100] rd[10..8] imm_8[7..0] */
- kThumbAddSpRel, /* add(6) [10101] rd[10..8] imm_8[7..0] */
- kThumbAddSpI7, /* add(7) [101100000] imm_7[6..0] */
- kThumbAndRR, /* and [0100000000] rm[5..3] rd[2..0] */
- kThumbAsrRRI5, /* asr(1) [00010] imm_5[10..6] rm[5..3] rd[2..0] */
- kThumbAsrRR, /* asr(2) [0100000100] rs[5..3] rd[2..0] */
- kThumbBCond, /* b(1) [1101] cond[11..8] offset_8[7..0] */
- kThumbBUncond, /* b(2) [11100] offset_11[10..0] */
- kThumbBicRR, /* bic [0100001110] rm[5..3] rd[2..0] */
- kThumbBkpt, /* bkpt [10111110] imm_8[7..0] */
- kThumbBlx1, /* blx(1) [111] H[10] offset_11[10..0] */
- kThumbBlx2, /* blx(1) [111] H[01] offset_11[10..0] */
- kThumbBl1, /* blx(1) [111] H[10] offset_11[10..0] */
- kThumbBl2, /* blx(1) [111] H[11] offset_11[10..0] */
- kThumbBlxR, /* blx(2) [010001111] rm[6..3] [000] */
- kThumbBx, /* bx [010001110] H2[6..6] rm[5..3] SBZ[000] */
- kThumbCmnRR, /* cmn [0100001011] rm[5..3] rd[2..0] */
- kThumbCmpRI8, /* cmp(1) [00101] rn[10..8] imm_8[7..0] */
- kThumbCmpRR, /* cmp(2) [0100001010] rm[5..3] rd[2..0] */
- kThumbCmpLH, /* cmp(3) [01000101] H12[01] rm[5..3] rd[2..0] */
- kThumbCmpHL, /* cmp(3) [01000110] H12[10] rm[5..3] rd[2..0] */
- kThumbCmpHH, /* cmp(3) [01000111] H12[11] rm[5..3] rd[2..0] */
- kThumbEorRR, /* eor [0100000001] rm[5..3] rd[2..0] */
- kThumbLdmia, /* ldmia [11001] rn[10..8] reglist [7..0] */
- kThumbLdrRRI5, /* ldr(1) [01101] imm_5[10..6] rn[5..3] rd[2..0] */
- kThumbLdrRRR, /* ldr(2) [0101100] rm[8..6] rn[5..3] rd[2..0] */
- kThumbLdrPcRel, /* ldr(3) [01001] rd[10..8] imm_8[7..0] */
- kThumbLdrSpRel, /* ldr(4) [10011] rd[10..8] imm_8[7..0] */
- kThumbLdrbRRI5, /* ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0] */
- kThumbLdrbRRR, /* ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0] */
- kThumbLdrhRRI5, /* ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0] */
- kThumbLdrhRRR, /* ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0] */
- kThumbLdrsbRRR, /* ldrsb [0101011] rm[8..6] rn[5..3] rd[2..0] */
- kThumbLdrshRRR, /* ldrsh [0101111] rm[8..6] rn[5..3] rd[2..0] */
- kThumbLslRRI5, /* lsl(1) [00000] imm_5[10..6] rm[5..3] rd[2..0] */
- kThumbLslRR, /* lsl(2) [0100000010] rs[5..3] rd[2..0] */
- kThumbLsrRRI5, /* lsr(1) [00001] imm_5[10..6] rm[5..3] rd[2..0] */
- kThumbLsrRR, /* lsr(2) [0100000011] rs[5..3] rd[2..0] */
- kThumbMovImm, /* mov(1) [00100] rd[10..8] imm_8[7..0] */
- kThumbMovRR, /* mov(2) [0001110000] rn[5..3] rd[2..0] */
- kThumbMovRR_H2H, /* mov(3) [01000111] H12[11] rm[5..3] rd[2..0] */
- kThumbMovRR_H2L, /* mov(3) [01000110] H12[01] rm[5..3] rd[2..0] */
- kThumbMovRR_L2H, /* mov(3) [01000101] H12[10] rm[5..3] rd[2..0] */
- kThumbMul, /* mul [0100001101] rm[5..3] rd[2..0] */
- kThumbMvn, /* mvn [0100001111] rm[5..3] rd[2..0] */
- kThumbNeg, /* neg [0100001001] rm[5..3] rd[2..0] */
- kThumbOrr, /* orr [0100001100] rm[5..3] rd[2..0] */
- kThumbPop, /* pop [1011110] r[8..8] rl[7..0] */
- kThumbPush, /* push [1011010] r[8..8] rl[7..0] */
- kThumbRorRR, /* ror [0100000111] rs[5..3] rd[2..0] */
- kThumbSbc, /* sbc [0100000110] rm[5..3] rd[2..0] */
- kThumbStmia, /* stmia [11000] rn[10..8] reglist [7.. 0] */
- kThumbStrRRI5, /* str(1) [01100] imm_5[10..6] rn[5..3] rd[2..0] */
- kThumbStrRRR, /* str(2) [0101000] rm[8..6] rn[5..3] rd[2..0] */
- kThumbStrSpRel, /* str(3) [10010] rd[10..8] imm_8[7..0] */
- kThumbStrbRRI5, /* strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0] */
- kThumbStrbRRR, /* strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0] */
- kThumbStrhRRI5, /* strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0] */
- kThumbStrhRRR, /* strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0] */
- kThumbSubRRI3, /* sub(1) [0001111] imm_3[8..6] rn[5..3] rd[2..0]*/
- kThumbSubRI8, /* sub(2) [00111] rd[10..8] imm_8[7..0] */
- kThumbSubRRR, /* sub(3) [0001101] rm[8..6] rn[5..3] rd[2..0] */
- kThumbSubSpI7, /* sub(4) [101100001] imm_7[6..0] */
- kThumbSwi, /* swi [11011111] imm_8[7..0] */
- kThumbTst, /* tst [0100001000] rm[5..3] rn[2..0] */
- kThumb2Vldrs, /* vldr low sx [111011011001] rn[19..16] rd[15-12]
- [1010] imm_8[7..0] */
- kThumb2Vldrd, /* vldr low dx [111011011001] rn[19..16] rd[15-12]
- [1011] imm_8[7..0] */
- kThumb2Vmuls, /* vmul vd, vn, vm [111011100010] rn[19..16]
- rd[15-12] [10100000] rm[3..0] */
- kThumb2Vmuld, /* vmul vd, vn, vm [111011100010] rn[19..16]
- rd[15-12] [10110000] rm[3..0] */
- kThumb2Vstrs, /* vstr low sx [111011011000] rn[19..16] rd[15-12]
- [1010] imm_8[7..0] */
- kThumb2Vstrd, /* vstr low dx [111011011000] rn[19..16] rd[15-12]
- [1011] imm_8[7..0] */
- kThumb2Vsubs, /* vsub vd, vn, vm [111011100011] rn[19..16]
- rd[15-12] [10100040] rm[3..0] */
- kThumb2Vsubd, /* vsub vd, vn, vm [111011100011] rn[19..16]
- rd[15-12] [10110040] rm[3..0] */
- kThumb2Vadds, /* vadd vd, vn, vm [111011100011] rn[19..16]
- rd[15-12] [10100000] rm[3..0] */
- kThumb2Vaddd, /* vadd vd, vn, vm [111011100011] rn[19..16]
- rd[15-12] [10110000] rm[3..0] */
- kThumb2Vdivs, /* vdiv vd, vn, vm [111011101000] rn[19..16]
- rd[15-12] [10100000] rm[3..0] */
- kThumb2Vdivd, /* vdiv vd, vn, vm [111011101000] rn[19..16]
- rd[15-12] [10110000] rm[3..0] */
- kThumb2VcvtIF, /* vcvt.F32 vd, vm [1110111010111000] vd[15..12]
- [10101100] vm[3..0] */
- kThumb2VcvtID, /* vcvt.F64 vd, vm [1110111010111000] vd[15..12]
- [10111100] vm[3..0] */
- kThumb2VcvtFI, /* vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12]
- [10101100] vm[3..0] */
- kThumb2VcvtDI, /* vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12]
- [10111100] vm[3..0] */
- kThumb2VcvtFd, /* vcvt.F64.F32 vd, vm [1110111010110111] vd[15..12]
- [10101100] vm[3..0] */
- kThumb2VcvtDF, /* vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12]
- [10111100] vm[3..0] */
- kThumb2Vsqrts, /* vsqrt.f32 vd, vm [1110111010110001] vd[15..12]
- [10101100] vm[3..0] */
- kThumb2Vsqrtd, /* vsqrt.f64 vd, vm [1110111010110001] vd[15..12]
- [10111100] vm[3..0] */
- kThumb2MovImmShift, /* mov(T2) rd, #<const> [11110] i [00001001111]
- imm3 rd[11..8] imm8 */
- kThumb2MovImm16, /* mov(T3) rd, #<const> [11110] i [0010100] imm4 [0]
- imm3 rd[11..8] imm8 */
- kThumb2StrRRI12, /* str(Imm,T3) rd,[rn,#imm12] [111110001100]
- rn[19..16] rt[15..12] imm12[11..0] */
- kThumb2LdrRRI12, /* str(Imm,T3) rd,[rn,#imm12] [111110001100]
- rn[19..16] rt[15..12] imm12[11..0] */
- kThumb2StrRRI8Predec, /* str(Imm,T4) rd,[rn,#-imm8] [111110000100]
- rn[19..16] rt[15..12] [1100] imm[7..0]*/
- kThumb2LdrRRI8Predec, /* ldr(Imm,T4) rd,[rn,#-imm8] [111110000101]
- rn[19..16] rt[15..12] [1100] imm[7..0]*/
- kThumb2Cbnz, /* cbnz rd,<label> [101110] i [1] imm5[7..3]
- rn[2..0] */
- kThumb2Cbz, /* cbn rd,<label> [101100] i [1] imm5[7..3]
- rn[2..0] */
- kThumb2AddRRI12, /* add rd, rn, #imm12 [11110] i [100000] rn[19..16]
- [0] imm3[14..12] rd[11..8] imm8[7..0] */
- kThumb2MovRR, /* mov rd, rm [11101010010011110000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2Vmovs, /* vmov.f32 vd, vm [111011101] D [110000]
- vd[15..12] 101001] M [0] vm[3..0] */
- kThumb2Vmovd, /* vmov.f64 vd, vm [111011101] D [110000]
- vd[15..12] 101101] M [0] vm[3..0] */
- kThumb2Ldmia, /* ldmia [111010001001[ rn[19..16] mask[15..0] */
- kThumb2Stmia, /* stmia [111010001000[ rn[19..16] mask[15..0] */
- kThumb2AddRRR, /* add [111010110000] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2SubRRR, /* sub [111010111010] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2SbcRRR, /* sbc [111010110110] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2CmpRR, /* cmp [111010111011] rn[19..16] [0000] [1111]
- [0000] rm[3..0] */
- kThumb2SubRRI12, /* sub rd, rn, #imm12 [11110] i [01010] rn[19..16]
- [0] imm3[14..12] rd[11..8] imm8[7..0] */
- kThumb2MvnImm12, /* mov(T2) rd, #<const> [11110] i [00011011110]
- imm3 rd[11..8] imm8 */
- kThumb2Sel, /* sel rd, rn, rm [111110101010] rn[19-16] rd[11-8]
- rm[3-0] */
- kThumb2Ubfx, /* ubfx rd,rn,#lsb,#width [111100111100] rn[19..16]
- [0] imm3[14-12] rd[11-8] w[4-0] */
- kThumb2Sbfx, /* ubfx rd,rn,#lsb,#width [111100110100] rn[19..16]
- [0] imm3[14-12] rd[11-8] w[4-0] */
- kThumb2LdrRRR, /* ldr rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
- rt[15-12] [000000] imm[5-4] rm[3-0] */
- kThumb2LdrhRRR, /* ldrh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
- rt[15-12] [000000] imm[5-4] rm[3-0] */
- kThumb2LdrshRRR, /* ldrsh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
- rt[15-12] [000000] imm[5-4] rm[3-0] */
- kThumb2LdrbRRR, /* ldrb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
- rt[15-12] [000000] imm[5-4] rm[3-0] */
- kThumb2LdrsbRRR, /* ldrsb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
- rt[15-12] [000000] imm[5-4] rm[3-0] */
- kThumb2StrRRR, /* str rt,[rn,rm,LSL #imm] [111110000100] rn[19-16]
- rt[15-12] [000000] imm[5-4] rm[3-0] */
- kThumb2StrhRRR, /* str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16]
- rt[15-12] [000000] imm[5-4] rm[3-0] */
- kThumb2StrbRRR, /* str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16]
- rt[15-12] [000000] imm[5-4] rm[3-0] */
- kThumb2LdrhRRI12, /* ldrh rt,[rn,#imm12] [111110001011]
- rt[15..12] rn[19..16] imm12[11..0] */
- kThumb2LdrshRRI12, /* ldrsh rt,[rn,#imm12] [111110011011]
- rt[15..12] rn[19..16] imm12[11..0] */
- kThumb2LdrbRRI12, /* ldrb rt,[rn,#imm12] [111110001001]
- rt[15..12] rn[19..16] imm12[11..0] */
- kThumb2LdrsbRRI12, /* ldrsb rt,[rn,#imm12] [111110011001]
- rt[15..12] rn[19..16] imm12[11..0] */
- kThumb2StrhRRI12, /* strh rt,[rn,#imm12] [111110001010]
- rt[15..12] rn[19..16] imm12[11..0] */
- kThumb2StrbRRI12, /* strb rt,[rn,#imm12] [111110001000]
- rt[15..12] rn[19..16] imm12[11..0] */
- kThumb2Pop, /* pop [1110100010111101] list[15-0]*/
- kThumb2Push, /* push [1110100100101101] list[15-0]*/
- kThumb2CmpRI8, /* cmp rn, #<const> [11110] i [011011] rn[19-16] [0]
- imm3 [1111] imm8[7..0] */
- kThumb2AdcRRR, /* adc [111010110101] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2AndRRR, /* and [111010100000] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2BicRRR, /* bic [111010100010] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2CmnRR, /* cmn [111010110001] rn[19..16] [0000] [1111]
- [0000] rm[3..0] */
- kThumb2EorRRR, /* eor [111010101000] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2MulRRR, /* mul [111110110000] rn[19..16] [1111] rd[11..8]
- [0000] rm[3..0] */
- kThumb2MnvRR, /* mvn [11101010011011110] rd[11-8] [0000]
- rm[3..0] */
- kThumb2RsubRRI8, /* rsub [111100011100] rn[19..16] [0000] rd[11..8]
- imm8[7..0] */
- kThumb2NegRR, /* actually rsub rd, rn, #0 */
- kThumb2OrrRRR, /* orr [111010100100] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2TstRR, /* tst [111010100001] rn[19..16] [0000] [1111]
- [0000] rm[3..0] */
- kThumb2LslRRR, /* lsl [111110100000] rn[19..16] [1111] rd[11..8]
- [0000] rm[3..0] */
- kThumb2LsrRRR, /* lsr [111110100010] rn[19..16] [1111] rd[11..8]
- [0000] rm[3..0] */
- kThumb2AsrRRR, /* asr [111110100100] rn[19..16] [1111] rd[11..8]
- [0000] rm[3..0] */
- kThumb2RorRRR, /* ror [111110100110] rn[19..16] [1111] rd[11..8]
- [0000] rm[3..0] */
- kThumb2LslRRI5, /* lsl [11101010010011110] imm[14.12] rd[11..8]
- [00] rm[3..0] */
- kThumb2LsrRRI5, /* lsr [11101010010011110] imm[14.12] rd[11..8]
- [01] rm[3..0] */
- kThumb2AsrRRI5, /* asr [11101010010011110] imm[14.12] rd[11..8]
- [10] rm[3..0] */
- kThumb2RorRRI5, /* ror [11101010010011110] imm[14.12] rd[11..8]
- [11] rm[3..0] */
- kThumb2BicRRI8, /* bic [111100000010] rn[19..16] [0] imm3
- rd[11..8] imm8 */
- kThumb2AndRRI8, /* bic [111100000000] rn[19..16] [0] imm3
- rd[11..8] imm8 */
- kThumb2OrrRRI8, /* orr [111100000100] rn[19..16] [0] imm3
- rd[11..8] imm8 */
- kThumb2EorRRI8, /* eor [111100001000] rn[19..16] [0] imm3
- rd[11..8] imm8 */
- kThumb2AddRRI8, /* add [111100001000] rn[19..16] [0] imm3
- rd[11..8] imm8 */
- kThumb2AdcRRI8, /* adc [111100010101] rn[19..16] [0] imm3
- rd[11..8] imm8 */
- kThumb2SubRRI8, /* sub [111100011011] rn[19..16] [0] imm3
- rd[11..8] imm8 */
- kThumb2SbcRRI8, /* sbc [111100010111] rn[19..16] [0] imm3
- rd[11..8] imm8 */
- kThumb2It, /* it [10111111] firstcond[7-4] mask[3-0] */
- kThumb2Fmstat, /* fmstat [11101110111100011111101000010000] */
- kThumb2Vcmpd, /* vcmp [111011101] D [11011] rd[15-12] [1011]
- E [1] M [0] rm[3-0] */
- kThumb2Vcmps, /* vcmp [111011101] D [11010] rd[15-12] [1011]
- E [1] M [0] rm[3-0] */
- kThumb2LdrPcRel12, /* ldr rd,[pc,#imm12] [1111100011011111] rt[15-12]
- imm12[11-0] */
- kThumb2BCond, /* b<c> [1110] S cond[25-22] imm6[21-16] [10]
- J1 [0] J2 imm11[10..0] */
- kThumb2Vmovd_RR, /* vmov [111011101] D [110000] vd[15-12 [101101]
- M [0] vm[3-0] */
- kThumb2Vmovs_RR, /* vmov [111011101] D [110000] vd[15-12 [101001]
- M [0] vm[3-0] */
- kThumb2Fmrs, /* vmov [111011100000] vn[19-16] rt[15-12] [1010]
- N [0010000] */
- kThumb2Fmsr, /* vmov [111011100001] vn[19-16] rt[15-12] [1010]
- N [0010000] */
- kThumb2Fmrrd, /* vmov [111011000100] rt2[19-16] rt[15-12]
- [101100] M [1] vm[3-0] */
- kThumb2Fmdrr, /* vmov [111011000101] rt2[19-16] rt[15-12]
- [101100] M [1] vm[3-0] */
- kThumb2Vabsd, /* vabs.f64 [111011101] D [110000] rd[15-12]
- [1011110] M [0] vm[3-0] */
- kThumb2Vabss, /* vabs.f32 [111011101] D [110000] rd[15-12]
- [1010110] M [0] vm[3-0] */
- kThumb2Vnegd, /* vneg.f64 [111011101] D [110000] rd[15-12]
- [1011110] M [0] vm[3-0] */
- kThumb2Vnegs, /* vneg.f32 [111011101] D [110000] rd[15-12]
- [1010110] M [0] vm[3-0] */
- kThumb2Vmovs_IMM8, /* vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12]
- [10100000] imm4l[3-0] */
- kThumb2Vmovd_IMM8, /* vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12]
- [10110000] imm4l[3-0] */
- kThumb2Mla, /* mla [111110110000] rn[19-16] ra[15-12] rd[7-4]
- [0000] rm[3-0] */
- kThumb2Umull, /* umull [111110111010] rn[19-16], rdlo[15-12]
- rdhi[11-8] [0000] rm[3-0] */
- kThumb2Ldrex, /* ldrex [111010000101] rn[19-16] rt[11-8] [1111]
- imm8[7-0] */
- kThumb2Strex, /* strex [111010000100] rn[19-16] rt[11-8] rd[11-8]
- imm8[7-0] */
- kThumb2Clrex, /* clrex [111100111011111110000111100101111] */
- kThumb2Bfi, /* bfi [111100110110] rn[19-16] [0] imm3[14-12]
- rd[11-8] imm2[7-6] [0] msb[4-0] */
- kThumb2Bfc, /* bfc [11110011011011110] [0] imm3[14-12]
- rd[11-8] imm2[7-6] [0] msb[4-0] */
- kThumb2Dmb, /* dmb [1111001110111111100011110101] option[3-0] */
- kThumb2LdrPcReln12, /* ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12]
- imm12[11-0] */
- kThumb2Stm, /* stm <list> [111010010000] rn[19-16] 000 rl[12-0] */
- kThumbUndefined, /* undefined [11011110xxxxxxxx] */
- kThumb2VPopCS, /* vpop <list of callee save fp singles (s16+) */
- kThumb2VPushCS, /* vpush <list callee save fp singles (s16+) */
- kThumb2Vldms, /* vldms rd, <list> */
- kThumb2Vstms, /* vstms rd, <list> */
- kThumb2BUncond, /* b <label> */
- kThumb2MovImm16H, /* similar to kThumb2MovImm16, but target high hw */
- kThumb2AddPCR, /* Thumb2 2-operand add with hard-coded PC target */
- kThumb2Adr, /* Special purpose encoding of ADR for switch tables */
- kThumb2MovImm16LST, /* Special purpose version for switch table use */
- kThumb2MovImm16HST, /* Special purpose version for switch table use */
- kThumb2LdmiaWB, /* ldmia [111010011001[ rn[19..16] mask[15..0] */
- kThumb2SubsRRI12, /* setflags encoding */
- kThumb2OrrRRRs, /* orrx [111010100101] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2Push1, /* t3 encoding of push */
- kThumb2Pop1, /* t3 encoding of pop */
- kThumb2RsubRRR, /* rsb [111010111101] rn[19..16] [0000] rd[11..8]
- [0000] rm[3..0] */
- kThumb2Smull, /* smull [111110111000] rn[19-16], rdlo[15-12]
- rdhi[11-8] [0000] rm[3-0] */
- kArmLast,
+ kPseudoIntrinsicRetry = -16,
+ kPseudoSuspendTarget = -15,
+ kPseudoThrowTarget = -14,
+ kPseudoCaseLabel = -13,
+ kPseudoMethodEntry = -12,
+ kPseudoMethodExit = -11,
+ kPseudoBarrier = -10,
+ kPseudoExtended = -9,
+ kPseudoSSARep = -8,
+ kPseudoEntryBlock = -7,
+ kPseudoExitBlock = -6,
+ kPseudoTargetLabel = -5,
+ kPseudoDalvikByteCodeBoundary = -4,
+ kPseudoPseudoAlign4 = -3,
+ kPseudoEHBlockLabel = -2,
+ kPseudoNormalBlockLabel = -1,
+ /************************************************************************/
+ kArm16BitData, /* DATA [0] rd[15..0] */
+ kThumbAdcRR, /* adc [0100000101] rm[5..3] rd[2..0] */
+ kThumbAddRRI3, /* add(1) [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/
+ kThumbAddRI8, /* add(2) [00110] rd[10..8] imm_8[7..0] */
+ kThumbAddRRR, /* add(3) [0001100] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbAddRRLH, /* add(4) [01000100] H12[01] rm[5..3] rd[2..0] */
+ kThumbAddRRHL, /* add(4) [01001000] H12[10] rm[5..3] rd[2..0] */
+ kThumbAddRRHH, /* add(4) [01001100] H12[11] rm[5..3] rd[2..0] */
+ kThumbAddPcRel, /* add(5) [10100] rd[10..8] imm_8[7..0] */
+ kThumbAddSpRel, /* add(6) [10101] rd[10..8] imm_8[7..0] */
+ kThumbAddSpI7, /* add(7) [101100000] imm_7[6..0] */
+ kThumbAndRR, /* and [0100000000] rm[5..3] rd[2..0] */
+ kThumbAsrRRI5, /* asr(1) [00010] imm_5[10..6] rm[5..3] rd[2..0] */
+ kThumbAsrRR, /* asr(2) [0100000100] rs[5..3] rd[2..0] */
+ kThumbBCond, /* b(1) [1101] cond[11..8] offset_8[7..0] */
+ kThumbBUncond, /* b(2) [11100] offset_11[10..0] */
+ kThumbBicRR, /* bic [0100001110] rm[5..3] rd[2..0] */
+ kThumbBkpt, /* bkpt [10111110] imm_8[7..0] */
+ kThumbBlx1, /* blx(1) [111] H[10] offset_11[10..0] */
+ kThumbBlx2, /* blx(1) [111] H[01] offset_11[10..0] */
+ kThumbBl1, /* blx(1) [111] H[10] offset_11[10..0] */
+ kThumbBl2, /* blx(1) [111] H[11] offset_11[10..0] */
+ kThumbBlxR, /* blx(2) [010001111] rm[6..3] [000] */
+ kThumbBx, /* bx [010001110] H2[6..6] rm[5..3] SBZ[000] */
+ kThumbCmnRR, /* cmn [0100001011] rm[5..3] rd[2..0] */
+ kThumbCmpRI8, /* cmp(1) [00101] rn[10..8] imm_8[7..0] */
+ kThumbCmpRR, /* cmp(2) [0100001010] rm[5..3] rd[2..0] */
+ kThumbCmpLH, /* cmp(3) [01000101] H12[01] rm[5..3] rd[2..0] */
+ kThumbCmpHL, /* cmp(3) [01000110] H12[10] rm[5..3] rd[2..0] */
+ kThumbCmpHH, /* cmp(3) [01000111] H12[11] rm[5..3] rd[2..0] */
+ kThumbEorRR, /* eor [0100000001] rm[5..3] rd[2..0] */
+ kThumbLdmia, /* ldmia [11001] rn[10..8] reglist [7..0] */
+ kThumbLdrRRI5, /* ldr(1) [01101] imm_5[10..6] rn[5..3] rd[2..0] */
+ kThumbLdrRRR, /* ldr(2) [0101100] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbLdrPcRel, /* ldr(3) [01001] rd[10..8] imm_8[7..0] */
+ kThumbLdrSpRel, /* ldr(4) [10011] rd[10..8] imm_8[7..0] */
+ kThumbLdrbRRI5, /* ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0] */
+ kThumbLdrbRRR, /* ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbLdrhRRI5, /* ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0] */
+ kThumbLdrhRRR, /* ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbLdrsbRRR, /* ldrsb [0101011] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbLdrshRRR, /* ldrsh [0101111] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbLslRRI5, /* lsl(1) [00000] imm_5[10..6] rm[5..3] rd[2..0] */
+ kThumbLslRR, /* lsl(2) [0100000010] rs[5..3] rd[2..0] */
+ kThumbLsrRRI5, /* lsr(1) [00001] imm_5[10..6] rm[5..3] rd[2..0] */
+ kThumbLsrRR, /* lsr(2) [0100000011] rs[5..3] rd[2..0] */
+ kThumbMovImm, /* mov(1) [00100] rd[10..8] imm_8[7..0] */
+ kThumbMovRR, /* mov(2) [0001110000] rn[5..3] rd[2..0] */
+ kThumbMovRR_H2H, /* mov(3) [01000111] H12[11] rm[5..3] rd[2..0] */
+ kThumbMovRR_H2L, /* mov(3) [01000110] H12[01] rm[5..3] rd[2..0] */
+ kThumbMovRR_L2H, /* mov(3) [01000101] H12[10] rm[5..3] rd[2..0] */
+ kThumbMul, /* mul [0100001101] rm[5..3] rd[2..0] */
+ kThumbMvn, /* mvn [0100001111] rm[5..3] rd[2..0] */
+ kThumbNeg, /* neg [0100001001] rm[5..3] rd[2..0] */
+ kThumbOrr, /* orr [0100001100] rm[5..3] rd[2..0] */
+ kThumbPop, /* pop [1011110] r[8..8] rl[7..0] */
+ kThumbPush, /* push [1011010] r[8..8] rl[7..0] */
+ kThumbRorRR, /* ror [0100000111] rs[5..3] rd[2..0] */
+ kThumbSbc, /* sbc [0100000110] rm[5..3] rd[2..0] */
+ kThumbStmia, /* stmia [11000] rn[10..8] reglist [7.. 0] */
+ kThumbStrRRI5, /* str(1) [01100] imm_5[10..6] rn[5..3] rd[2..0] */
+ kThumbStrRRR, /* str(2) [0101000] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbStrSpRel, /* str(3) [10010] rd[10..8] imm_8[7..0] */
+ kThumbStrbRRI5, /* strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0] */
+ kThumbStrbRRR, /* strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbStrhRRI5, /* strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0] */
+ kThumbStrhRRR, /* strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbSubRRI3, /* sub(1) [0001111] imm_3[8..6] rn[5..3] rd[2..0]*/
+ kThumbSubRI8, /* sub(2) [00111] rd[10..8] imm_8[7..0] */
+ kThumbSubRRR, /* sub(3) [0001101] rm[8..6] rn[5..3] rd[2..0] */
+ kThumbSubSpI7, /* sub(4) [101100001] imm_7[6..0] */
+ kThumbSwi, /* swi [11011111] imm_8[7..0] */
+ kThumbTst, /* tst [0100001000] rm[5..3] rn[2..0] */
+ kThumb2Vldrs, /* vldr low sx [111011011001] rn[19..16] rd[15-12]
+ [1010] imm_8[7..0] */
+ kThumb2Vldrd, /* vldr low dx [111011011001] rn[19..16] rd[15-12]
+ [1011] imm_8[7..0] */
+ kThumb2Vmuls, /* vmul vd, vn, vm [111011100010] rn[19..16]
+ rd[15-12] [10100000] rm[3..0] */
+ kThumb2Vmuld, /* vmul vd, vn, vm [111011100010] rn[19..16]
+ rd[15-12] [10110000] rm[3..0] */
+ kThumb2Vstrs, /* vstr low sx [111011011000] rn[19..16] rd[15-12]
+ [1010] imm_8[7..0] */
+ kThumb2Vstrd, /* vstr low dx [111011011000] rn[19..16] rd[15-12]
+ [1011] imm_8[7..0] */
+ kThumb2Vsubs, /* vsub vd, vn, vm [111011100011] rn[19..16]
+ rd[15-12] [10100040] rm[3..0] */
+ kThumb2Vsubd, /* vsub vd, vn, vm [111011100011] rn[19..16]
+ rd[15-12] [10110040] rm[3..0] */
+ kThumb2Vadds, /* vadd vd, vn, vm [111011100011] rn[19..16]
+ rd[15-12] [10100000] rm[3..0] */
+ kThumb2Vaddd, /* vadd vd, vn, vm [111011100011] rn[19..16]
+ rd[15-12] [10110000] rm[3..0] */
+ kThumb2Vdivs, /* vdiv vd, vn, vm [111011101000] rn[19..16]
+ rd[15-12] [10100000] rm[3..0] */
+ kThumb2Vdivd, /* vdiv vd, vn, vm [111011101000] rn[19..16]
+ rd[15-12] [10110000] rm[3..0] */
+ kThumb2VcvtIF, /* vcvt.F32 vd, vm [1110111010111000] vd[15..12]
+ [10101100] vm[3..0] */
+ kThumb2VcvtID, /* vcvt.F64 vd, vm [1110111010111000] vd[15..12]
+ [10111100] vm[3..0] */
+ kThumb2VcvtFI, /* vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12]
+ [10101100] vm[3..0] */
+ kThumb2VcvtDI, /* vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12]
+ [10111100] vm[3..0] */
+ kThumb2VcvtFd, /* vcvt.F64.F32 vd, vm [1110111010110111] vd[15..12]
+ [10101100] vm[3..0] */
+ kThumb2VcvtDF, /* vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12]
+ [10111100] vm[3..0] */
+ kThumb2Vsqrts, /* vsqrt.f32 vd, vm [1110111010110001] vd[15..12]
+ [10101100] vm[3..0] */
+ kThumb2Vsqrtd, /* vsqrt.f64 vd, vm [1110111010110001] vd[15..12]
+ [10111100] vm[3..0] */
+ kThumb2MovImmShift,/* mov(T2) rd, #<const> [11110] i [00001001111]
+ imm3 rd[11..8] imm8 */
+ kThumb2MovImm16, /* mov(T3) rd, #<const> [11110] i [0010100] imm4 [0]
+ imm3 rd[11..8] imm8 */
+ kThumb2StrRRI12, /* str(Imm,T3) rd,[rn,#imm12] [111110001100]
+ rn[19..16] rt[15..12] imm12[11..0] */
+ kThumb2LdrRRI12, /* str(Imm,T3) rd,[rn,#imm12] [111110001100]
+ rn[19..16] rt[15..12] imm12[11..0] */
+ kThumb2StrRRI8Predec, /* str(Imm,T4) rd,[rn,#-imm8] [111110000100]
+ rn[19..16] rt[15..12] [1100] imm[7..0]*/
+ kThumb2LdrRRI8Predec, /* ldr(Imm,T4) rd,[rn,#-imm8] [111110000101]
+ rn[19..16] rt[15..12] [1100] imm[7..0]*/
+ kThumb2Cbnz, /* cbnz rd,<label> [101110] i [1] imm5[7..3]
+ rn[2..0] */
+ kThumb2Cbz, /* cbn rd,<label> [101100] i [1] imm5[7..3]
+ rn[2..0] */
+ kThumb2AddRRI12, /* add rd, rn, #imm12 [11110] i [100000] rn[19..16]
+ [0] imm3[14..12] rd[11..8] imm8[7..0] */
+ kThumb2MovRR, /* mov rd, rm [11101010010011110000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2Vmovs, /* vmov.f32 vd, vm [111011101] D [110000]
+ vd[15..12] 101001] M [0] vm[3..0] */
+ kThumb2Vmovd, /* vmov.f64 vd, vm [111011101] D [110000]
+ vd[15..12] 101101] M [0] vm[3..0] */
+ kThumb2Ldmia, /* ldmia [111010001001[ rn[19..16] mask[15..0] */
+ kThumb2Stmia, /* stmia [111010001000[ rn[19..16] mask[15..0] */
+ kThumb2AddRRR, /* add [111010110000] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2SubRRR, /* sub [111010111010] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2SbcRRR, /* sbc [111010110110] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2CmpRR, /* cmp [111010111011] rn[19..16] [0000] [1111]
+ [0000] rm[3..0] */
+ kThumb2SubRRI12, /* sub rd, rn, #imm12 [11110] i [01010] rn[19..16]
+ [0] imm3[14..12] rd[11..8] imm8[7..0] */
+ kThumb2MvnImm12, /* mov(T2) rd, #<const> [11110] i [00011011110]
+ imm3 rd[11..8] imm8 */
+ kThumb2Sel, /* sel rd, rn, rm [111110101010] rn[19-16] rd[11-8]
+ rm[3-0] */
+ kThumb2Ubfx, /* ubfx rd,rn,#lsb,#width [111100111100] rn[19..16]
+ [0] imm3[14-12] rd[11-8] w[4-0] */
+ kThumb2Sbfx, /* ubfx rd,rn,#lsb,#width [111100110100] rn[19..16]
+ [0] imm3[14-12] rd[11-8] w[4-0] */
+ kThumb2LdrRRR, /* ldr rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
+ rt[15-12] [000000] imm[5-4] rm[3-0] */
+ kThumb2LdrhRRR, /* ldrh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
+ rt[15-12] [000000] imm[5-4] rm[3-0] */
+ kThumb2LdrshRRR, /* ldrsh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
+ rt[15-12] [000000] imm[5-4] rm[3-0] */
+ kThumb2LdrbRRR, /* ldrb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
+ rt[15-12] [000000] imm[5-4] rm[3-0] */
+ kThumb2LdrsbRRR, /* ldrsb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
+ rt[15-12] [000000] imm[5-4] rm[3-0] */
+ kThumb2StrRRR, /* str rt,[rn,rm,LSL #imm] [111110000100] rn[19-16]
+ rt[15-12] [000000] imm[5-4] rm[3-0] */
+ kThumb2StrhRRR, /* str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16]
+ rt[15-12] [000000] imm[5-4] rm[3-0] */
+ kThumb2StrbRRR, /* str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16]
+ rt[15-12] [000000] imm[5-4] rm[3-0] */
+ kThumb2LdrhRRI12, /* ldrh rt,[rn,#imm12] [111110001011]
+ rt[15..12] rn[19..16] imm12[11..0] */
+ kThumb2LdrshRRI12, /* ldrsh rt,[rn,#imm12] [111110011011]
+ rt[15..12] rn[19..16] imm12[11..0] */
+ kThumb2LdrbRRI12, /* ldrb rt,[rn,#imm12] [111110001001]
+ rt[15..12] rn[19..16] imm12[11..0] */
+ kThumb2LdrsbRRI12, /* ldrsb rt,[rn,#imm12] [111110011001]
+ rt[15..12] rn[19..16] imm12[11..0] */
+ kThumb2StrhRRI12, /* strh rt,[rn,#imm12] [111110001010]
+ rt[15..12] rn[19..16] imm12[11..0] */
+ kThumb2StrbRRI12, /* strb rt,[rn,#imm12] [111110001000]
+ rt[15..12] rn[19..16] imm12[11..0] */
+ kThumb2Pop, /* pop [1110100010111101] list[15-0]*/
+ kThumb2Push, /* push [1110100100101101] list[15-0]*/
+ kThumb2CmpRI8, /* cmp rn, #<const> [11110] i [011011] rn[19-16] [0]
+ imm3 [1111] imm8[7..0] */
+ kThumb2AdcRRR, /* adc [111010110101] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2AndRRR, /* and [111010100000] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2BicRRR, /* bic [111010100010] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2CmnRR, /* cmn [111010110001] rn[19..16] [0000] [1111]
+ [0000] rm[3..0] */
+ kThumb2EorRRR, /* eor [111010101000] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2MulRRR, /* mul [111110110000] rn[19..16] [1111] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2MnvRR, /* mvn [11101010011011110] rd[11-8] [0000]
+ rm[3..0] */
+ kThumb2RsubRRI8, /* rsub [111100011100] rn[19..16] [0000] rd[11..8]
+ imm8[7..0] */
+ kThumb2NegRR, /* actually rsub rd, rn, #0 */
+ kThumb2OrrRRR, /* orr [111010100100] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2TstRR, /* tst [111010100001] rn[19..16] [0000] [1111]
+ [0000] rm[3..0] */
+ kThumb2LslRRR, /* lsl [111110100000] rn[19..16] [1111] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2LsrRRR, /* lsr [111110100010] rn[19..16] [1111] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2AsrRRR, /* asr [111110100100] rn[19..16] [1111] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2RorRRR, /* ror [111110100110] rn[19..16] [1111] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2LslRRI5, /* lsl [11101010010011110] imm[14.12] rd[11..8]
+ [00] rm[3..0] */
+ kThumb2LsrRRI5, /* lsr [11101010010011110] imm[14.12] rd[11..8]
+ [01] rm[3..0] */
+ kThumb2AsrRRI5, /* asr [11101010010011110] imm[14.12] rd[11..8]
+ [10] rm[3..0] */
+ kThumb2RorRRI5, /* ror [11101010010011110] imm[14.12] rd[11..8]
+ [11] rm[3..0] */
+ kThumb2BicRRI8, /* bic [111100000010] rn[19..16] [0] imm3
+ rd[11..8] imm8 */
+ kThumb2AndRRI8, /* bic [111100000000] rn[19..16] [0] imm3
+ rd[11..8] imm8 */
+ kThumb2OrrRRI8, /* orr [111100000100] rn[19..16] [0] imm3
+ rd[11..8] imm8 */
+ kThumb2EorRRI8, /* eor [111100001000] rn[19..16] [0] imm3
+ rd[11..8] imm8 */
+ kThumb2AddRRI8, /* add [111100001000] rn[19..16] [0] imm3
+ rd[11..8] imm8 */
+ kThumb2AdcRRI8, /* adc [111100010101] rn[19..16] [0] imm3
+ rd[11..8] imm8 */
+ kThumb2SubRRI8, /* sub [111100011011] rn[19..16] [0] imm3
+ rd[11..8] imm8 */
+ kThumb2SbcRRI8, /* sbc [111100010111] rn[19..16] [0] imm3
+ rd[11..8] imm8 */
+ kThumb2It, /* it [10111111] firstcond[7-4] mask[3-0] */
+ kThumb2Fmstat, /* fmstat [11101110111100011111101000010000] */
+ kThumb2Vcmpd, /* vcmp [111011101] D [11011] rd[15-12] [1011]
+ E [1] M [0] rm[3-0] */
+ kThumb2Vcmps, /* vcmp [111011101] D [11010] rd[15-12] [1011]
+ E [1] M [0] rm[3-0] */
+ kThumb2LdrPcRel12, /* ldr rd,[pc,#imm12] [1111100011011111] rt[15-12]
+ imm12[11-0] */
+ kThumb2BCond, /* b<c> [1110] S cond[25-22] imm6[21-16] [10]
+ J1 [0] J2 imm11[10..0] */
+ kThumb2Vmovd_RR, /* vmov [111011101] D [110000] vd[15-12 [101101]
+ M [0] vm[3-0] */
+ kThumb2Vmovs_RR, /* vmov [111011101] D [110000] vd[15-12 [101001]
+ M [0] vm[3-0] */
+ kThumb2Fmrs, /* vmov [111011100000] vn[19-16] rt[15-12] [1010]
+ N [0010000] */
+ kThumb2Fmsr, /* vmov [111011100001] vn[19-16] rt[15-12] [1010]
+ N [0010000] */
+ kThumb2Fmrrd, /* vmov [111011000100] rt2[19-16] rt[15-12]
+ [101100] M [1] vm[3-0] */
+ kThumb2Fmdrr, /* vmov [111011000101] rt2[19-16] rt[15-12]
+ [101100] M [1] vm[3-0] */
+ kThumb2Vabsd, /* vabs.f64 [111011101] D [110000] rd[15-12]
+ [1011110] M [0] vm[3-0] */
+ kThumb2Vabss, /* vabs.f32 [111011101] D [110000] rd[15-12]
+ [1010110] M [0] vm[3-0] */
+ kThumb2Vnegd, /* vneg.f64 [111011101] D [110000] rd[15-12]
+ [1011110] M [0] vm[3-0] */
+ kThumb2Vnegs, /* vneg.f32 [111011101] D [110000] rd[15-12]
+ [1010110] M [0] vm[3-0] */
+ kThumb2Vmovs_IMM8, /* vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12]
+ [10100000] imm4l[3-0] */
+ kThumb2Vmovd_IMM8, /* vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12]
+ [10110000] imm4l[3-0] */
+ kThumb2Mla, /* mla [111110110000] rn[19-16] ra[15-12] rd[7-4]
+ [0000] rm[3-0] */
+ kThumb2Umull, /* umull [111110111010] rn[19-16], rdlo[15-12]
+ rdhi[11-8] [0000] rm[3-0] */
+ kThumb2Ldrex, /* ldrex [111010000101] rn[19-16] rt[11-8] [1111]
+ imm8[7-0] */
+ kThumb2Strex, /* strex [111010000100] rn[19-16] rt[11-8] rd[11-8]
+ imm8[7-0] */
+ kThumb2Clrex, /* clrex [111100111011111110000111100101111] */
+ kThumb2Bfi, /* bfi [111100110110] rn[19-16] [0] imm3[14-12]
+ rd[11-8] imm2[7-6] [0] msb[4-0] */
+ kThumb2Bfc, /* bfc [11110011011011110] [0] imm3[14-12]
+ rd[11-8] imm2[7-6] [0] msb[4-0] */
+ kThumb2Dmb, /* dmb [1111001110111111100011110101] option[3-0] */
+ kThumb2LdrPcReln12,/* ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12]
+ imm12[11-0] */
+ kThumb2Stm, /* stm <list> [111010010000] rn[19-16] 000 rl[12-0] */
+ kThumbUndefined, /* undefined [11011110xxxxxxxx] */
+ kThumb2VPopCS, /* vpop <list of callee save fp singles (s16+) */
+ kThumb2VPushCS, /* vpush <list callee save fp singles (s16+) */
+ kThumb2Vldms, /* vldms rd, <list> */
+ kThumb2Vstms, /* vstms rd, <list> */
+ kThumb2BUncond, /* b <label> */
+ kThumb2MovImm16H, /* similar to kThumb2MovImm16, but target high hw */
+ kThumb2AddPCR, /* Thumb2 2-operand add with hard-coded PC target */
+ kThumb2Adr, /* Special purpose encoding of ADR for switch tables */
+ kThumb2MovImm16LST,/* Special purpose version for switch table use */
+ kThumb2MovImm16HST,/* Special purpose version for switch table use */
+ kThumb2LdmiaWB, /* ldmia [111010011001[ rn[19..16] mask[15..0] */
+ kThumb2SubsRRI12, /* setflags encoding */
+ kThumb2OrrRRRs, /* orrx [111010100101] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2Push1, /* t3 encoding of push */
+ kThumb2Pop1, /* t3 encoding of pop */
+ kThumb2RsubRRR, /* rsb [111010111101] rn[19..16] [0000] rd[11..8]
+ [0000] rm[3..0] */
+ kThumb2Smull, /* smull [111110111000] rn[19-16], rdlo[15-12]
+ rdhi[11-8] [0000] rm[3-0] */
+ kArmLast,
};
/* DMB option encodings */
enum ArmOpDmbOptions {
- kSY = 0xf,
- kST = 0xe,
- kISH = 0xb,
- kISHST = 0xa,
- kNSH = 0x7,
- kNSHST = 0x6
+ kSY = 0xf,
+ kST = 0xe,
+ kISH = 0xb,
+ kISHST = 0xa,
+ kNSH = 0x7,
+ kNSHST = 0x6
};
/* Bit flags describing the behavior of each native opcode */
enum ArmOpFeatureFlags {
- kIsBranch = 0,
- kRegDef0,
- kRegDef1,
- kRegDefSP,
- kRegDefLR,
- kRegDefList0,
- kRegDefList1,
- kRegDefFPCSList0,
- kRegDefFPCSList2,
- kRegDefList2,
- kRegUse0,
- kRegUse1,
- kRegUse2,
- kRegUse3,
- kRegUseSP,
- kRegUsePC,
- kRegUseList0,
- kRegUseList1,
- kRegUseFPCSList0,
- kRegUseFPCSList2,
- kNoOperand,
- kIsUnaryOp,
- kIsBinaryOp,
- kIsTertiaryOp,
- kIsQuadOp,
- kIsIT,
- kSetsCCodes,
- kUsesCCodes,
- kMemLoad,
- kMemStore,
- kPCRelFixup,
+ kIsBranch = 0,
+ kRegDef0,
+ kRegDef1,
+ kRegDefSP,
+ kRegDefLR,
+ kRegDefList0,
+ kRegDefList1,
+ kRegDefFPCSList0,
+ kRegDefFPCSList2,
+ kRegDefList2,
+ kRegUse0,
+ kRegUse1,
+ kRegUse2,
+ kRegUse3,
+ kRegUseSP,
+ kRegUsePC,
+ kRegUseList0,
+ kRegUseList1,
+ kRegUseFPCSList0,
+ kRegUseFPCSList2,
+ kNoOperand,
+ kIsUnaryOp,
+ kIsBinaryOp,
+ kIsTertiaryOp,
+ kIsQuadOp,
+ kIsIT,
+ kSetsCCodes,
+ kUsesCCodes,
+ kMemLoad,
+ kMemStore,
+ kPCRelFixup,
};
#define IS_LOAD (1 << kMemLoad)
@@ -713,41 +713,41 @@
/* Instruction assembly fieldLoc kind */
enum ArmEncodingKind {
- kFmtUnused,
- kFmtBitBlt, /* Bit string using end/start */
- kFmtDfp, /* Double FP reg */
- kFmtSfp, /* Single FP reg */
- kFmtModImm, /* Shifted 8-bit immed using [26,14..12,7..0] */
- kFmtImm16, /* Zero-extended immed using [26,19..16,14..12,7..0] */
- kFmtImm6, /* Encoded branch target using [9,7..3]0 */
- kFmtImm12, /* Zero-extended immediate using [26,14..12,7..0] */
- kFmtShift, /* Shift descriptor, [14..12,7..4] */
- kFmtLsb, /* least significant bit using [14..12][7..6] */
- kFmtBWidth, /* bit-field width, encoded as width-1 */
- kFmtShift5, /* Shift count, [14..12,7..6] */
- kFmtBrOffset, /* Signed extended [26,11,13,21-16,10-0]:0 */
- kFmtFPImm, /* Encoded floating point immediate */
- kFmtOff24, /* 24-bit Thumb2 unconditional branch encoding */
+ kFmtUnused,
+ kFmtBitBlt, /* Bit string using end/start */
+ kFmtDfp, /* Double FP reg */
+ kFmtSfp, /* Single FP reg */
+ kFmtModImm, /* Shifted 8-bit immed using [26,14..12,7..0] */
+ kFmtImm16, /* Zero-extended immed using [26,19..16,14..12,7..0] */
+ kFmtImm6, /* Encoded branch target using [9,7..3]0 */
+ kFmtImm12, /* Zero-extended immediate using [26,14..12,7..0] */
+ kFmtShift, /* Shift descriptor, [14..12,7..4] */
+ kFmtLsb, /* least significant bit using [14..12][7..6] */
+ kFmtBWidth, /* bit-field width, encoded as width-1 */
+ kFmtShift5, /* Shift count, [14..12,7..6] */
+ kFmtBrOffset, /* Signed extended [26,11,13,21-16,10-0]:0 */
+ kFmtFPImm, /* Encoded floating point immediate */
+ kFmtOff24, /* 24-bit Thumb2 unconditional branch encoding */
};
/* Struct used to define the snippet positions for each Thumb opcode */
struct ArmEncodingMap {
- u4 skeleton;
- struct {
- ArmEncodingKind kind;
- int end; /* end for kFmtBitBlt, 1-bit slice end for FP regs */
- int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
- } fieldLoc[4];
- ArmOpcode opcode;
- int flags;
- const char* name;
- const char* fmt;
- int size; /* Size in bytes */
+ u4 skeleton;
+ struct {
+ ArmEncodingKind kind;
+ int end; /* end for kFmtBitBlt, 1-bit slice end for FP regs */
+ int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
+ } fieldLoc[4];
+ ArmOpcode opcode;
+ int flags;
+ const char* name;
+ const char* fmt;
+ int size; /* Size in bytes */
};
/* Keys for target-specific scheduling and other optimization hints */
enum ArmTargetOptHints {
- kMaxHoistDistance,
+ kMaxHoistDistance,
};
extern const ArmEncodingMap EncodingMap[kArmLast];
diff --git a/src/compiler/codegen/arm/ArmRallocUtil.cc b/src/compiler/codegen/arm/ArmRallocUtil.cc
index 89bff5e..bf7c1c7 100644
--- a/src/compiler/codegen/arm/ArmRallocUtil.cc
+++ b/src/compiler/codegen/arm/ArmRallocUtil.cc
@@ -35,8 +35,8 @@
void oatAdjustSpillMask(CompilationUnit* cUnit)
{
- cUnit->coreSpillMask |= (1 << rLR);
- cUnit->numCoreSpills++;
+ cUnit->coreSpillMask |= (1 << rLR);
+ cUnit->numCoreSpills++;
}
/*
@@ -47,143 +47,140 @@
*/
void oatMarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg)
{
- DCHECK_GE(reg, FP_REG_MASK + FP_CALLEE_SAVE_BASE);
- reg = (reg & FP_REG_MASK) - FP_CALLEE_SAVE_BASE;
- // Ensure fpVmapTable is large enough
- int tableSize = cUnit->fpVmapTable.size();
- for (int i = tableSize; i < (reg + 1); i++) {
- cUnit->fpVmapTable.push_back(INVALID_VREG);
- }
- // Add the current mapping
- cUnit->fpVmapTable[reg] = vReg;
- // Size of fpVmapTable is high-water mark, use to set mask
- cUnit->numFPSpills = cUnit->fpVmapTable.size();
- cUnit->fpSpillMask = ((1 << cUnit->numFPSpills) - 1) << FP_CALLEE_SAVE_BASE;
+ DCHECK_GE(reg, FP_REG_MASK + FP_CALLEE_SAVE_BASE);
+ reg = (reg & FP_REG_MASK) - FP_CALLEE_SAVE_BASE;
+ // Ensure fpVmapTable is large enough
+ int tableSize = cUnit->fpVmapTable.size();
+ for (int i = tableSize; i < (reg + 1); i++) {
+ cUnit->fpVmapTable.push_back(INVALID_VREG);
+ }
+ // Add the current mapping
+ cUnit->fpVmapTable[reg] = vReg;
+ // Size of fpVmapTable is high-water mark, use to set mask
+ cUnit->numFPSpills = cUnit->fpVmapTable.size();
+ cUnit->fpSpillMask = ((1 << cUnit->numFPSpills) - 1) << FP_CALLEE_SAVE_BASE;
}
void oatFlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
{
- RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
- DCHECK(info1 && info2 && info1->pair && info2->pair &&
- (info1->partner == info2->reg) &&
- (info2->partner == info1->reg));
- if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
- LOG(FATAL) << "Long half-temp, half-promoted";
- }
-
- info1->dirty = false;
- info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) <
- SRegToVReg(cUnit, info1->sReg))
- info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- oatFlushRegWideImpl(cUnit, rSP,
- oatVRegOffset(cUnit, vReg),
- info1->reg, info1->partner);
+ RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
+ RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->isTemp && info2->isTemp)) {
+ /* Should not happen. If it does, there's a problem in evalLoc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
}
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (SRegToVReg(cUnit, info2->sReg) <
+ SRegToVReg(cUnit, info1->sReg))
+ info1 = info2;
+ int vReg = SRegToVReg(cUnit, info1->sReg);
+ oatFlushRegWideImpl(cUnit, rSP, oatVRegOffset(cUnit, vReg),
+ info1->reg, info1->partner);
+ }
}
void oatFlushReg(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- if (info->live && info->dirty) {
- info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- oatFlushRegImpl(cUnit, rSP,
- oatVRegOffset(cUnit, vReg),
- reg, kWord);
- }
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int vReg = SRegToVReg(cUnit, info->sReg);
+ oatFlushRegImpl(cUnit, rSP, oatVRegOffset(cUnit, vReg), reg, kWord);
+ }
}
/* Give access to the target-dependent FP register encoding to common code */
bool oatIsFpReg(int reg) {
- return FPREG(reg);
+ return FPREG(reg);
}
uint32_t oatFpRegMask() {
- return FP_REG_MASK;
+ return FP_REG_MASK;
}
/* Clobber all regs that might be used by an external C call */
void oatClobberCalleeSave(CompilationUnit *cUnit)
{
- oatClobber(cUnit, r0);
- oatClobber(cUnit, r1);
- oatClobber(cUnit, r2);
- oatClobber(cUnit, r3);
- oatClobber(cUnit, r12);
- oatClobber(cUnit, r14lr);
- oatClobber(cUnit, fr0);
- oatClobber(cUnit, fr1);
- oatClobber(cUnit, fr2);
- oatClobber(cUnit, fr3);
- oatClobber(cUnit, fr4);
- oatClobber(cUnit, fr5);
- oatClobber(cUnit, fr6);
- oatClobber(cUnit, fr7);
- oatClobber(cUnit, fr8);
- oatClobber(cUnit, fr9);
- oatClobber(cUnit, fr10);
- oatClobber(cUnit, fr11);
- oatClobber(cUnit, fr12);
- oatClobber(cUnit, fr13);
- oatClobber(cUnit, fr14);
- oatClobber(cUnit, fr15);
+ oatClobber(cUnit, r0);
+ oatClobber(cUnit, r1);
+ oatClobber(cUnit, r2);
+ oatClobber(cUnit, r3);
+ oatClobber(cUnit, r12);
+ oatClobber(cUnit, r14lr);
+ oatClobber(cUnit, fr0);
+ oatClobber(cUnit, fr1);
+ oatClobber(cUnit, fr2);
+ oatClobber(cUnit, fr3);
+ oatClobber(cUnit, fr4);
+ oatClobber(cUnit, fr5);
+ oatClobber(cUnit, fr6);
+ oatClobber(cUnit, fr7);
+ oatClobber(cUnit, fr8);
+ oatClobber(cUnit, fr9);
+ oatClobber(cUnit, fr10);
+ oatClobber(cUnit, fr11);
+ oatClobber(cUnit, fr12);
+ oatClobber(cUnit, fr13);
+ oatClobber(cUnit, fr14);
+ oatClobber(cUnit, fr15);
}
extern RegLocation oatGetReturnWideAlt(CompilationUnit* cUnit)
{
- RegLocation res = LOC_C_RETURN_WIDE;
- res.lowReg = r2;
- res.highReg = r3;
- oatClobber(cUnit, r2);
- oatClobber(cUnit, r3);
- oatMarkInUse(cUnit, r2);
- oatMarkInUse(cUnit, r3);
- oatMarkPair(cUnit, res.lowReg, res.highReg);
- return res;
+ RegLocation res = LOC_C_RETURN_WIDE;
+ res.lowReg = r2;
+ res.highReg = r3;
+ oatClobber(cUnit, r2);
+ oatClobber(cUnit, r3);
+ oatMarkInUse(cUnit, r2);
+ oatMarkInUse(cUnit, r3);
+ oatMarkPair(cUnit, res.lowReg, res.highReg);
+ return res;
}
extern RegLocation oatGetReturnAlt(CompilationUnit* cUnit)
{
- RegLocation res = LOC_C_RETURN;
- res.lowReg = r1;
- oatClobber(cUnit, r1);
- oatMarkInUse(cUnit, r1);
- return res;
+ RegLocation res = LOC_C_RETURN;
+ res.lowReg = r1;
+ oatClobber(cUnit, r1);
+ oatMarkInUse(cUnit, r1);
+ return res;
}
extern RegisterInfo* oatGetRegInfo(CompilationUnit* cUnit, int reg)
{
- return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
+ return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
+ : &cUnit->regPool->coreRegs[reg];
}
/* To be used when explicitly managing register use */
extern void oatLockCallTemps(CompilationUnit* cUnit)
{
- oatLockTemp(cUnit, r0);
- oatLockTemp(cUnit, r1);
- oatLockTemp(cUnit, r2);
- oatLockTemp(cUnit, r3);
+ oatLockTemp(cUnit, r0);
+ oatLockTemp(cUnit, r1);
+ oatLockTemp(cUnit, r2);
+ oatLockTemp(cUnit, r3);
}
/* To be used when explicitly managing register use */
extern void oatFreeCallTemps(CompilationUnit* cUnit)
{
- oatFreeTemp(cUnit, r0);
- oatFreeTemp(cUnit, r1);
- oatFreeTemp(cUnit, r2);
- oatFreeTemp(cUnit, r3);
+ oatFreeTemp(cUnit, r0);
+ oatFreeTemp(cUnit, r1);
+ oatFreeTemp(cUnit, r2);
+ oatFreeTemp(cUnit, r3);
}
/* Convert an instruction to a NOP */
void oatNopLIR( LIR* lir)
{
- ((LIR*)lir)->flags.isNop = true;
+ ((LIR*)lir)->flags.isNop = true;
}
} // namespace art
diff --git a/src/compiler/codegen/arm/Assemble.cc b/src/compiler/codegen/arm/Assemble.cc
index 1d1442a..aca146e 100644
--- a/src/compiler/codegen/arm/Assemble.cc
+++ b/src/compiler/codegen/arm/Assemble.cc
@@ -991,393 +991,382 @@
* instruction.
*/
AssemblerStatus oatAssembleInstructions(CompilationUnit* cUnit,
- intptr_t startAddr)
+ intptr_t startAddr)
{
- LIR* lir;
- AssemblerStatus res = kSuccess; // Assume success
+ LIR* lir;
+ AssemblerStatus res = kSuccess; // Assume success
- for (lir = (LIR *) cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
+ for (lir = (LIR *) cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
- if (lir->opcode < 0) {
- if ((lir->opcode == kPseudoPseudoAlign4) &&
- /* 1 means padding is needed */
- (lir->operands[0] == 1)) {
- cUnit->codeBuffer.push_back(PADDING_MOV_R5_R5 & 0xFF);
- cUnit->codeBuffer.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
- }
- continue;
- }
-
- if (lir->flags.isNop) {
- continue;
- }
-
- /*
- * For PC-relative displacements we won't know if the
- * selected instruction will work until late (i.e. - now).
- * If something doesn't fit, we must replace the short-form
- * operation with a longer-form one. Note, though, that this
- * can change code we've already processed, so we'll need to
- * re-calculate offsets and restart. To limit the number of
- * restarts, the entire list will be scanned and patched.
- * Of course, the patching itself may cause new overflows so this
- * is an iterative process.
- */
- if (lir->flags.pcRelFixup) {
- if (lir->opcode == kThumbLdrPcRel ||
- lir->opcode == kThumb2LdrPcRel12 ||
- lir->opcode == kThumbAddPcRel ||
- ((lir->opcode == kThumb2Vldrd) && (lir->operands[1] == r15pc)) ||
- ((lir->opcode == kThumb2Vldrs) && (lir->operands[1] == r15pc))) {
- /*
- * PC-relative loads are mostly used to load immediates
- * that are too large to materialize directly in one shot.
- * However, if the load displacement exceeds the limit,
- * we revert to a 2-instruction materialization sequence.
- */
- LIR *lirTarget = (LIR *) lir->target;
- intptr_t pc = (lir->offset + 4) & ~3;
- intptr_t target = lirTarget->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- // First, a sanity check for cases we shouldn't see now
- if (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) ||
- ((lir->opcode == kThumbLdrPcRel) && (delta > 1020))) {
- // Shouldn't happen in current codegen.
- LOG(FATAL) << "Unexpected pc-rel offset " << delta;
- }
- // Now, check for the two difficult cases
- if (((lir->opcode == kThumb2LdrPcRel12) && (delta > 4091)) ||
- ((lir->opcode == kThumb2Vldrs) && (delta > 1020)) ||
- ((lir->opcode == kThumb2Vldrd) && (delta > 1020))) {
- /*
- * Note: because rLR may be used to fix up out-of-range
- * vldrs/vldrd we include REG_DEF_LR in the resource
- * masks for these instructions.
- */
- int baseReg = (lir->opcode == kThumb2LdrPcRel12) ?
- lir->operands[0] : rLR;
-
- // Add new Adr to generate the address
- LIR* newAdr = rawLIR(cUnit, lir->dalvikOffset, kThumb2Adr,
- baseReg, 0, 0, 0, 0, lir->target);
- oatInsertLIRBefore((LIR*)lir, (LIR*)newAdr);
-
- // Convert to normal load
- if (lir->opcode == kThumb2LdrPcRel12) {
- lir->opcode = kThumb2LdrRRI12;
- }
- // Change the load to be relative to the new Adr base
- lir->operands[1] = baseReg;
- lir->operands[2] = 0;
- oatSetupResourceMasks(lir);
- res = kRetryAll;
- } else {
- if ((lir->opcode == kThumb2Vldrs) ||
- (lir->opcode == kThumb2Vldrd)) {
- lir->operands[2] = delta >> 2;
- } else {
- lir->operands[1] = (lir->opcode == kThumb2LdrPcRel12) ?
- delta : delta >> 2;
- }
- }
- } else if (lir->opcode == kThumb2Cbnz || lir->opcode == kThumb2Cbz) {
- LIR *targetLIR = (LIR *) lir->target;
- intptr_t pc = lir->offset + 4;
- intptr_t target = targetLIR->offset;
- int delta = target - pc;
- if (delta > 126 || delta < 0) {
- /*
- * Convert to cmp rx,#0 / b[eq/ne] tgt pair
- * Make new branch instruction and insert after
- */
- LIR* newInst =
- rawLIR(cUnit, lir->dalvikOffset, kThumbBCond, 0,
- (lir->opcode == kThumb2Cbz) ? kArmCondEq : kArmCondNe,
- 0, 0, 0, lir->target);
- oatInsertLIRAfter((LIR *)lir, (LIR *)newInst);
- /* Convert the cb[n]z to a cmp rx, #0 ] */
- lir->opcode = kThumbCmpRI8;
- /* operand[0] is src1 in both cb[n]z & CmpRI8 */
- lir->operands[1] = 0;
- lir->target = 0;
- oatSetupResourceMasks(lir);
- res = kRetryAll;
- } else {
- lir->operands[1] = delta >> 1;
- }
- } else if (lir->opcode == kThumb2Push ||
- lir->opcode == kThumb2Pop) {
- if (__builtin_popcount(lir->operands[0]) == 1) {
- /*
- * The standard push/pop multiple instruction
- * requires at least two registers in the list.
- * If we've got just one, switch to the single-reg
- * encoding.
- */
- lir->opcode = (lir->opcode == kThumb2Push)
- ? kThumb2Push1 : kThumb2Pop1;
- int reg = 0;
- while (lir->operands[0]) {
- if (lir->operands[0] & 0x1) {
- break;
- } else {
- reg++;
- lir->operands[0] >>= 1;
- }
- }
- lir->operands[0] = reg;
- oatSetupResourceMasks(lir);
- res = kRetryAll;
- }
- } else if (lir->opcode == kThumbBCond ||
- lir->opcode == kThumb2BCond) {
- LIR *targetLIR = (LIR *) lir->target;
- int delta = 0;
- DCHECK(targetLIR);
- intptr_t pc = lir->offset + 4;
- intptr_t target = targetLIR->offset;
- delta = target - pc;
- if ((lir->opcode == kThumbBCond) &&
- (delta > 254 || delta < -256)) {
- lir->opcode = kThumb2BCond;
- oatSetupResourceMasks(lir);
- res = kRetryAll;
- }
- lir->operands[0] = delta >> 1;
- } else if (lir->opcode == kThumb2BUncond) {
- LIR *targetLIR = (LIR *) lir->target;
- intptr_t pc = lir->offset + 4;
- intptr_t target = targetLIR->offset;
- int delta = target - pc;
- lir->operands[0] = delta >> 1;
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) &&
- lir->operands[0] == 0) { // Useless branch
- lir->flags.isNop = true;
- res = kRetryAll;
- }
- } else if (lir->opcode == kThumbBUncond) {
- LIR *targetLIR = (LIR *) lir->target;
- intptr_t pc = lir->offset + 4;
- intptr_t target = targetLIR->offset;
- int delta = target - pc;
- if (delta > 2046 || delta < -2048) {
- // Convert to Thumb2BCond w/ kArmCondAl
- lir->opcode = kThumb2BUncond;
- lir->operands[0] = 0;
- oatSetupResourceMasks(lir);
- res = kRetryAll;
- } else {
- lir->operands[0] = delta >> 1;
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) &&
- lir->operands[0] == -1) { // Useless branch
- lir->flags.isNop = true;
- res = kRetryAll;
- }
- }
- } else if (lir->opcode == kThumbBlx1) {
- DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
- /* curPC is Thumb */
- intptr_t curPC = (startAddr + lir->offset + 4) & ~3;
- intptr_t target = lir->operands[1];
-
- /* Match bit[1] in target with base */
- if (curPC & 0x2) {
- target |= 0x2;
- }
- int delta = target - curPC;
- DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
-
- lir->operands[0] = (delta >> 12) & 0x7ff;
- NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
- } else if (lir->opcode == kThumbBl1) {
- DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
- /* Both curPC and target are Thumb */
- intptr_t curPC = startAddr + lir->offset + 4;
- intptr_t target = lir->operands[1];
-
- int delta = target - curPC;
- DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
-
- lir->operands[0] = (delta >> 12) & 0x7ff;
- NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
- } else if (lir->opcode == kThumb2Adr) {
- SwitchTable *tabRec = (SwitchTable*)lir->operands[2];
- LIR* target = (LIR*)lir->target;
- int targetDisp = tabRec ? tabRec->offset
- : target->offset;
- int disp = targetDisp - ((lir->offset + 4) & ~3);
- if (disp < 4096) {
- lir->operands[1] = disp;
- } else {
- // convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
- LIR *newMov16L =
- rawLIR(cUnit, lir->dalvikOffset, kThumb2MovImm16LST,
- lir->operands[0], 0, (intptr_t)lir, (intptr_t)tabRec,
- 0, lir->target);
- oatInsertLIRBefore((LIR*)lir, (LIR*)newMov16L);
- LIR *newMov16H =
- rawLIR(cUnit, lir->dalvikOffset, kThumb2MovImm16HST,
- lir->operands[0], 0, (intptr_t)lir, (intptr_t)tabRec,
- 0, lir->target);
- oatInsertLIRBefore((LIR*)lir, (LIR*)newMov16H);
- lir->opcode = kThumb2AddRRR;
- lir->operands[1] = rPC;
- lir->operands[2] = lir->operands[0];
- oatSetupResourceMasks(lir);
- res = kRetryAll;
- }
- } else if (lir->opcode == kThumb2MovImm16LST) {
- // operands[1] should hold disp, [2] has add, [3] has tabRec
- LIR *addPCInst = (LIR*)lir->operands[2];
- SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
- // If tabRec is null, this is a literal load. Use target
- LIR* target = (LIR*)lir->target;
- int targetDisp = tabRec ? tabRec->offset
- : target->offset;
- lir->operands[1] = (targetDisp -
- (addPCInst->offset + 4)) & 0xffff;
- } else if (lir->opcode == kThumb2MovImm16HST) {
- // operands[1] should hold disp, [2] has add, [3] has tabRec
- LIR *addPCInst = (LIR*)lir->operands[2];
- SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
- // If tabRec is null, this is a literal load. Use target
- LIR* target = (LIR*)lir->target;
- int targetDisp = tabRec ? tabRec->offset
- : target->offset;
- lir->operands[1] = ((targetDisp -
- (addPCInst->offset + 4)) >> 16) & 0xffff;
- }
- }
- /*
- * If one of the pc-relative instructions expanded we'll have
- * to make another pass. Don't bother to fully assemble the
- * instruction.
- */
- if (res != kSuccess) {
- continue;
- }
- const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
- u4 bits = encoder->skeleton;
- int i;
- for (i = 0; i < 4; i++) {
- u4 operand;
- u4 value;
- operand = lir->operands[i];
- switch (encoder->fieldLoc[i].kind) {
- case kFmtUnused:
- break;
- case kFmtFPImm:
- value = ((operand & 0xF0) >> 4) << encoder->fieldLoc[i].end;
- value |= (operand & 0x0F) << encoder->fieldLoc[i].start;
- bits |= value;
- break;
- case kFmtBrOffset:
- value = ((operand & 0x80000) >> 19) << 26;
- value |= ((operand & 0x40000) >> 18) << 11;
- value |= ((operand & 0x20000) >> 17) << 13;
- value |= ((operand & 0x1f800) >> 11) << 16;
- value |= (operand & 0x007ff);
- bits |= value;
- break;
- case kFmtShift5:
- value = ((operand & 0x1c) >> 2) << 12;
- value |= (operand & 0x03) << 6;
- bits |= value;
- break;
- case kFmtShift:
- value = ((operand & 0x70) >> 4) << 12;
- value |= (operand & 0x0f) << 4;
- bits |= value;
- break;
- case kFmtBWidth:
- value = operand - 1;
- bits |= value;
- break;
- case kFmtLsb:
- value = ((operand & 0x1c) >> 2) << 12;
- value |= (operand & 0x03) << 6;
- bits |= value;
- break;
- case kFmtImm6:
- value = ((operand & 0x20) >> 5) << 9;
- value |= (operand & 0x1f) << 3;
- bits |= value;
- break;
- case kFmtBitBlt:
- value = (operand << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
- bits |= value;
- break;
- case kFmtDfp: {
- DCHECK(DOUBLEREG(operand));
- DCHECK((operand & 0x1) == 0);
- int regName = (operand & FP_REG_MASK) >> 1;
- /* Snag the 1-bit slice and position it */
- value = ((regName & 0x10) >> 4) <<
- encoder->fieldLoc[i].end;
- /* Extract and position the 4-bit slice */
- value |= (regName & 0x0f) <<
- encoder->fieldLoc[i].start;
- bits |= value;
- break;
- }
- case kFmtSfp:
- DCHECK(SINGLEREG(operand));
- /* Snag the 1-bit slice and position it */
- value = (operand & 0x1) <<
- encoder->fieldLoc[i].end;
- /* Extract and position the 4-bit slice */
- value |= ((operand & 0x1e) >> 1) <<
- encoder->fieldLoc[i].start;
- bits |= value;
- break;
- case kFmtImm12:
- case kFmtModImm:
- value = ((operand & 0x800) >> 11) << 26;
- value |= ((operand & 0x700) >> 8) << 12;
- value |= operand & 0x0ff;
- bits |= value;
- break;
- case kFmtImm16:
- value = ((operand & 0x0800) >> 11) << 26;
- value |= ((operand & 0xf000) >> 12) << 16;
- value |= ((operand & 0x0700) >> 8) << 12;
- value |= operand & 0x0ff;
- bits |= value;
- break;
- case kFmtOff24: {
- u4 signbit = (operand >> 31) & 0x1;
- u4 i1 = (operand >> 22) & 0x1;
- u4 i2 = (operand >> 21) & 0x1;
- u4 imm10 = (operand >> 11) & 0x03ff;
- u4 imm11 = operand & 0x07ff;
- u4 j1 = (i1 ^ signbit) ? 0 : 1;
- u4 j2 = (i2 ^ signbit) ? 0 : 1;
- value = (signbit << 26) | (j1 << 13) | (j2 << 11) |
- (imm10 << 16) | imm11;
- bits |= value;
- }
- break;
- default:
- LOG(FATAL) << "Bad fmt:" << (int)encoder->fieldLoc[i].kind;
- }
- }
- if (encoder->size == 4) {
- cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
- }
- cUnit->codeBuffer.push_back(bits & 0xff);
- cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
+ if (lir->opcode < 0) {
+ /* 1 means padding is needed */
+ if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) {
+ cUnit->codeBuffer.push_back(PADDING_MOV_R5_R5 & 0xFF);
+ cUnit->codeBuffer.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
+ }
+ continue;
}
- return res;
+
+ if (lir->flags.isNop) {
+ continue;
+ }
+
+ /*
+ * For PC-relative displacements we won't know if the
+ * selected instruction will work until late (i.e. - now).
+ * If something doesn't fit, we must replace the short-form
+ * operation with a longer-form one. Note, though, that this
+ * can change code we've already processed, so we'll need to
+ * re-calculate offsets and restart. To limit the number of
+ * restarts, the entire list will be scanned and patched.
+ * Of course, the patching itself may cause new overflows so this
+ * is an iterative process.
+ */
+ if (lir->flags.pcRelFixup) {
+ if (lir->opcode == kThumbLdrPcRel ||
+ lir->opcode == kThumb2LdrPcRel12 ||
+ lir->opcode == kThumbAddPcRel ||
+ ((lir->opcode == kThumb2Vldrd) && (lir->operands[1] == r15pc)) ||
+ ((lir->opcode == kThumb2Vldrs) && (lir->operands[1] == r15pc))) {
+ /*
+ * PC-relative loads are mostly used to load immediates
+ * that are too large to materialize directly in one shot.
+ * However, if the load displacement exceeds the limit,
+ * we revert to a 2-instruction materialization sequence.
+ */
+ LIR *lirTarget = (LIR *) lir->target;
+ intptr_t pc = (lir->offset + 4) & ~3;
+ intptr_t target = lirTarget->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ // First, a sanity check for cases we shouldn't see now
+ if (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) ||
+ ((lir->opcode == kThumbLdrPcRel) && (delta > 1020))) {
+ // Shouldn't happen in current codegen.
+ LOG(FATAL) << "Unexpected pc-rel offset " << delta;
+ }
+ // Now, check for the two difficult cases
+ if (((lir->opcode == kThumb2LdrPcRel12) && (delta > 4091)) ||
+ ((lir->opcode == kThumb2Vldrs) && (delta > 1020)) ||
+ ((lir->opcode == kThumb2Vldrd) && (delta > 1020))) {
+ /*
+ * Note: because rLR may be used to fix up out-of-range
+ * vldrs/vldrd we include REG_DEF_LR in the resource
+ * masks for these instructions.
+ */
+ int baseReg = (lir->opcode == kThumb2LdrPcRel12) ?
+ lir->operands[0] : rLR;
+
+ // Add new Adr to generate the address
+ LIR* newAdr = rawLIR(cUnit, lir->dalvikOffset, kThumb2Adr,
+ baseReg, 0, 0, 0, 0, lir->target);
+ oatInsertLIRBefore((LIR*)lir, (LIR*)newAdr);
+
+ // Convert to normal load
+ if (lir->opcode == kThumb2LdrPcRel12) {
+ lir->opcode = kThumb2LdrRRI12;
+ }
+ // Change the load to be relative to the new Adr base
+ lir->operands[1] = baseReg;
+ lir->operands[2] = 0;
+ oatSetupResourceMasks(lir);
+ res = kRetryAll;
+ } else {
+ if ((lir->opcode == kThumb2Vldrs) ||
+ (lir->opcode == kThumb2Vldrd)) {
+ lir->operands[2] = delta >> 2;
+ } else {
+ lir->operands[1] = (lir->opcode == kThumb2LdrPcRel12) ? delta :
+ delta >> 2;
+ }
+ }
+ } else if (lir->opcode == kThumb2Cbnz || lir->opcode == kThumb2Cbz) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ if (delta > 126 || delta < 0) {
+ /*
+ * Convert to cmp rx,#0 / b[eq/ne] tgt pair
+ * Make new branch instruction and insert after
+ */
+ LIR* newInst =
+ rawLIR(cUnit, lir->dalvikOffset, kThumbBCond, 0,
+ (lir->opcode == kThumb2Cbz) ? kArmCondEq : kArmCondNe,
+ 0, 0, 0, lir->target);
+ oatInsertLIRAfter((LIR *)lir, (LIR *)newInst);
+ /* Convert the cb[n]z to a cmp rx, #0 ] */
+ lir->opcode = kThumbCmpRI8;
+ /* operand[0] is src1 in both cb[n]z & CmpRI8 */
+ lir->operands[1] = 0;
+ lir->target = 0;
+ oatSetupResourceMasks(lir);
+ res = kRetryAll;
+ } else {
+ lir->operands[1] = delta >> 1;
+ }
+ } else if (lir->opcode == kThumb2Push || lir->opcode == kThumb2Pop) {
+ if (__builtin_popcount(lir->operands[0]) == 1) {
+ /*
+ * The standard push/pop multiple instruction
+ * requires at least two registers in the list.
+ * If we've got just one, switch to the single-reg
+ * encoding.
+ */
+ lir->opcode = (lir->opcode == kThumb2Push) ? kThumb2Push1 :
+ kThumb2Pop1;
+ int reg = 0;
+ while (lir->operands[0]) {
+ if (lir->operands[0] & 0x1) {
+ break;
+ } else {
+ reg++;
+ lir->operands[0] >>= 1;
+ }
+ }
+ lir->operands[0] = reg;
+ oatSetupResourceMasks(lir);
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kThumbBCond || lir->opcode == kThumb2BCond) {
+ LIR *targetLIR = (LIR *) lir->target;
+ int delta = 0;
+ DCHECK(targetLIR);
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ delta = target - pc;
+ if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
+ lir->opcode = kThumb2BCond;
+ oatSetupResourceMasks(lir);
+ res = kRetryAll;
+ }
+ lir->operands[0] = delta >> 1;
+ } else if (lir->opcode == kThumb2BUncond) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ lir->operands[0] = delta >> 1;
+ if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) &&
+ lir->operands[0] == 0) { // Useless branch
+ lir->flags.isNop = true;
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kThumbBUncond) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ if (delta > 2046 || delta < -2048) {
+ // Convert to Thumb2BCond w/ kArmCondAl
+ lir->opcode = kThumb2BUncond;
+ lir->operands[0] = 0;
+ oatSetupResourceMasks(lir);
+ res = kRetryAll;
+ } else {
+ lir->operands[0] = delta >> 1;
+ if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) &&
+ lir->operands[0] == -1) { // Useless branch
+ lir->flags.isNop = true;
+ res = kRetryAll;
+ }
+ }
+ } else if (lir->opcode == kThumbBlx1) {
+ DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
+ /* curPC is Thumb */
+ intptr_t curPC = (startAddr + lir->offset + 4) & ~3;
+ intptr_t target = lir->operands[1];
+
+ /* Match bit[1] in target with base */
+ if (curPC & 0x2) {
+ target |= 0x2;
+ }
+ int delta = target - curPC;
+ DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
+
+ lir->operands[0] = (delta >> 12) & 0x7ff;
+ NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
+ } else if (lir->opcode == kThumbBl1) {
+ DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
+ /* Both curPC and target are Thumb */
+ intptr_t curPC = startAddr + lir->offset + 4;
+ intptr_t target = lir->operands[1];
+
+ int delta = target - curPC;
+ DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
+
+ lir->operands[0] = (delta >> 12) & 0x7ff;
+ NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
+ } else if (lir->opcode == kThumb2Adr) {
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[2];
+ LIR* target = (LIR*)lir->target;
+ int targetDisp = tabRec ? tabRec->offset
+ : target->offset;
+ int disp = targetDisp - ((lir->offset + 4) & ~3);
+ if (disp < 4096) {
+ lir->operands[1] = disp;
+ } else {
+ // convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
+ LIR *newMov16L =
+ rawLIR(cUnit, lir->dalvikOffset, kThumb2MovImm16LST,
+ lir->operands[0], 0, (intptr_t)lir, (intptr_t)tabRec,
+ 0, lir->target);
+ oatInsertLIRBefore((LIR*)lir, (LIR*)newMov16L);
+ LIR *newMov16H =
+ rawLIR(cUnit, lir->dalvikOffset, kThumb2MovImm16HST,
+ lir->operands[0], 0, (intptr_t)lir, (intptr_t)tabRec,
+ 0, lir->target);
+ oatInsertLIRBefore((LIR*)lir, (LIR*)newMov16H);
+ lir->opcode = kThumb2AddRRR;
+ lir->operands[1] = rPC;
+ lir->operands[2] = lir->operands[0];
+ oatSetupResourceMasks(lir);
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kThumb2MovImm16LST) {
+ // operands[1] should hold disp, [2] has add, [3] has tabRec
+ LIR *addPCInst = (LIR*)lir->operands[2];
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
+ // If tabRec is null, this is a literal load. Use target
+ LIR* target = (LIR*)lir->target;
+ int targetDisp = tabRec ? tabRec->offset : target->offset;
+ lir->operands[1] = (targetDisp - (addPCInst->offset + 4)) & 0xffff;
+ } else if (lir->opcode == kThumb2MovImm16HST) {
+ // operands[1] should hold disp, [2] has add, [3] has tabRec
+ LIR *addPCInst = (LIR*)lir->operands[2];
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
+ // If tabRec is null, this is a literal load. Use target
+ LIR* target = (LIR*)lir->target;
+ int targetDisp = tabRec ? tabRec->offset : target->offset;
+ lir->operands[1] =
+ ((targetDisp - (addPCInst->offset + 4)) >> 16) & 0xffff;
+ }
+ }
+ /*
+ * If one of the pc-relative instructions expanded we'll have
+ * to make another pass. Don't bother to fully assemble the
+ * instruction.
+ */
+ if (res != kSuccess) {
+ continue;
+ }
+ const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
+ u4 bits = encoder->skeleton;
+ int i;
+ for (i = 0; i < 4; i++) {
+ u4 operand;
+ u4 value;
+ operand = lir->operands[i];
+ switch (encoder->fieldLoc[i].kind) {
+ case kFmtUnused:
+ break;
+ case kFmtFPImm:
+ value = ((operand & 0xF0) >> 4) << encoder->fieldLoc[i].end;
+ value |= (operand & 0x0F) << encoder->fieldLoc[i].start;
+ bits |= value;
+ break;
+ case kFmtBrOffset:
+ value = ((operand & 0x80000) >> 19) << 26;
+ value |= ((operand & 0x40000) >> 18) << 11;
+ value |= ((operand & 0x20000) >> 17) << 13;
+ value |= ((operand & 0x1f800) >> 11) << 16;
+ value |= (operand & 0x007ff);
+ bits |= value;
+ break;
+ case kFmtShift5:
+ value = ((operand & 0x1c) >> 2) << 12;
+ value |= (operand & 0x03) << 6;
+ bits |= value;
+ break;
+ case kFmtShift:
+ value = ((operand & 0x70) >> 4) << 12;
+ value |= (operand & 0x0f) << 4;
+ bits |= value;
+ break;
+ case kFmtBWidth:
+ value = operand - 1;
+ bits |= value;
+ break;
+ case kFmtLsb:
+ value = ((operand & 0x1c) >> 2) << 12;
+ value |= (operand & 0x03) << 6;
+ bits |= value;
+ break;
+ case kFmtImm6:
+ value = ((operand & 0x20) >> 5) << 9;
+ value |= (operand & 0x1f) << 3;
+ bits |= value;
+ break;
+ case kFmtBitBlt:
+ value = (operand << encoder->fieldLoc[i].start) &
+ ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ case kFmtDfp: {
+ DCHECK(DOUBLEREG(operand));
+ DCHECK((operand & 0x1) == 0);
+ int regName = (operand & FP_REG_MASK) >> 1;
+ /* Snag the 1-bit slice and position it */
+ value = ((regName & 0x10) >> 4) << encoder->fieldLoc[i].end;
+ /* Extract and position the 4-bit slice */
+ value |= (regName & 0x0f) << encoder->fieldLoc[i].start;
+ bits |= value;
+ break;
+ }
+ case kFmtSfp:
+ DCHECK(SINGLEREG(operand));
+ /* Snag the 1-bit slice and position it */
+ value = (operand & 0x1) << encoder->fieldLoc[i].end;
+ /* Extract and position the 4-bit slice */
+ value |= ((operand & 0x1e) >> 1) << encoder->fieldLoc[i].start;
+ bits |= value;
+ break;
+ case kFmtImm12:
+ case kFmtModImm:
+ value = ((operand & 0x800) >> 11) << 26;
+ value |= ((operand & 0x700) >> 8) << 12;
+ value |= operand & 0x0ff;
+ bits |= value;
+ break;
+ case kFmtImm16:
+ value = ((operand & 0x0800) >> 11) << 26;
+ value |= ((operand & 0xf000) >> 12) << 16;
+ value |= ((operand & 0x0700) >> 8) << 12;
+ value |= operand & 0x0ff;
+ bits |= value;
+ break;
+ case kFmtOff24: {
+ u4 signbit = (operand >> 31) & 0x1;
+ u4 i1 = (operand >> 22) & 0x1;
+ u4 i2 = (operand >> 21) & 0x1;
+ u4 imm10 = (operand >> 11) & 0x03ff;
+ u4 imm11 = operand & 0x07ff;
+ u4 j1 = (i1 ^ signbit) ? 0 : 1;
+ u4 j2 = (i2 ^ signbit) ? 0 : 1;
+ value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) |
+ imm11;
+ bits |= value;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Bad fmt:" << (int)encoder->fieldLoc[i].kind;
+ }
+ }
+ if (encoder->size == 4) {
+ cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
+ cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
+ }
+ cUnit->codeBuffer.push_back(bits & 0xff);
+ cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
+ }
+ return res;
}
int oatGetInsnSize(LIR* lir)
{
- return EncodingMap[lir->opcode].size;
+ return EncodingMap[lir->opcode].size;
}
/*
@@ -1385,29 +1374,29 @@
*/
int oatAssignInsnOffsets(CompilationUnit* cUnit)
{
- LIR* armLIR;
- int offset = 0;
+ LIR* armLIR;
+ int offset = 0;
- for (armLIR = (LIR *) cUnit->firstLIRInsn;
- armLIR;
- armLIR = NEXT_LIR(armLIR)) {
- armLIR->offset = offset;
- if (armLIR->opcode >= 0) {
- if (!armLIR->flags.isNop) {
- offset += armLIR->flags.size;
- }
- } else if (armLIR->opcode == kPseudoPseudoAlign4) {
- if (offset & 0x2) {
- offset += 2;
- armLIR->operands[0] = 1;
- } else {
- armLIR->operands[0] = 0;
- }
- }
- /* Pseudo opcodes don't consume space */
+ for (armLIR = (LIR *) cUnit->firstLIRInsn;
+ armLIR;
+ armLIR = NEXT_LIR(armLIR)) {
+ armLIR->offset = offset;
+ if (armLIR->opcode >= 0) {
+ if (!armLIR->flags.isNop) {
+ offset += armLIR->flags.size;
+ }
+ } else if (armLIR->opcode == kPseudoPseudoAlign4) {
+ if (offset & 0x2) {
+ offset += 2;
+ armLIR->operands[0] = 1;
+ } else {
+ armLIR->operands[0] = 0;
+ }
}
+ /* Pseudo opcodes don't consume space */
+ }
- return offset;
+ return offset;
}
} // namespace art
diff --git a/src/compiler/codegen/arm/Codegen.h b/src/compiler/codegen/arm/Codegen.h
index f467d4f..10f5e38 100644
--- a/src/compiler/codegen/arm/Codegen.h
+++ b/src/compiler/codegen/arm/Codegen.h
@@ -36,12 +36,12 @@
/* Forward declaraton the portable versions due to circular dependency */
bool genArithOpFloatPortable(CompilationUnit* cUnit, MIR* mir,
- RegLocation rlDest, RegLocation rlSrc1,
- RegLocation rlSrc2);
+ RegLocation rlDest, RegLocation rlSrc1,
+ RegLocation rlSrc2);
bool genArithOpDoublePortable(CompilationUnit* cUnit, MIR* mir,
- RegLocation rlDest, RegLocation rlSrc1,
- RegLocation rlSrc2);
+ RegLocation rlDest, RegLocation rlSrc1,
+ RegLocation rlSrc2);
bool genConversionPortable(CompilationUnit* cUnit, MIR* mir);
@@ -64,10 +64,10 @@
*/
inline RegisterClass oatRegClassBySize(OpSize size)
{
- return (size == kUnsignedHalf ||
- size == kSignedHalf ||
- size == kUnsignedByte ||
- size == kSignedByte ) ? kCoreReg : kAnyReg;
+ return (size == kUnsignedHalf ||
+ size == kSignedHalf ||
+ size == kUnsignedByte ||
+ size == kSignedByte ) ? kCoreReg : kAnyReg;
}
/*
@@ -79,12 +79,12 @@
*/
#if __BYTE_ORDER == __LITTLE_ENDIAN
inline s4 s4FromSwitchData(const void* switchData) {
- return *(s4*) switchData;
+ return *(s4*) switchData;
}
#else
inline s4 s4FromSwitchData(const void* switchData) {
- u2* data = switchData;
- return data[0] | (((s4) data[1]) << 16);
+ u2* data = switchData;
+ return data[0] | (((s4) data[1]) << 16);
}
#endif
@@ -92,7 +92,6 @@
extern void oatSetupResourceMasks(LIR* lir);
-extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest,
- int rSrc);
+extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc);
} // namespace art
diff --git a/src/compiler/codegen/arm/FP/Thumb2VFP.cc b/src/compiler/codegen/arm/FP/Thumb2VFP.cc
index 380c014..fbce1f5 100644
--- a/src/compiler/codegen/arm/FP/Thumb2VFP.cc
+++ b/src/compiler/codegen/arm/FP/Thumb2VFP.cc
@@ -19,280 +19,277 @@
bool genArithOpFloat(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc1, RegLocation rlSrc2)
{
- int op = kThumbBkpt;
- RegLocation rlResult;
+ int op = kThumbBkpt;
+ RegLocation rlResult;
- /*
- * Don't attempt to optimize register usage since these opcodes call out to
- * the handlers.
- */
- switch (mir->dalvikInsn.opcode) {
- case Instruction::ADD_FLOAT_2ADDR:
- case Instruction::ADD_FLOAT:
- op = kThumb2Vadds;
- break;
- case Instruction::SUB_FLOAT_2ADDR:
- case Instruction::SUB_FLOAT:
- op = kThumb2Vsubs;
- break;
- case Instruction::DIV_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT:
- op = kThumb2Vdivs;
- break;
- case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::MUL_FLOAT:
- op = kThumb2Vmuls;
- break;
- case Instruction::REM_FLOAT_2ADDR:
- case Instruction::REM_FLOAT:
- case Instruction::NEG_FLOAT: {
- return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1,
- rlSrc2);
- }
- default:
- return true;
+ /*
+ * Don't attempt to optimize register usage since these opcodes call out to
+ * the handlers.
+ */
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_FLOAT:
+ op = kThumb2Vadds;
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT:
+ op = kThumb2Vsubs;
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT:
+ op = kThumb2Vdivs;
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT:
+ op = kThumb2Vmuls;
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT:
+ case Instruction::NEG_FLOAT: {
+ return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
}
- rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- newLIR3(cUnit, (ArmOpcode)op, rlResult.lowReg, rlSrc1.lowReg,
- rlSrc2.lowReg);
- storeValue(cUnit, rlDest, rlResult);
- return false;
+ default:
+ return true;
+ }
+ rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR3(cUnit, (ArmOpcode)op, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
+ return false;
}
bool genArithOpDouble(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc1, RegLocation rlSrc2)
{
- int op = kThumbBkpt;
- RegLocation rlResult;
+ int op = kThumbBkpt;
+ RegLocation rlResult;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::ADD_DOUBLE_2ADDR:
- case Instruction::ADD_DOUBLE:
- op = kThumb2Vaddd;
- break;
- case Instruction::SUB_DOUBLE_2ADDR:
- case Instruction::SUB_DOUBLE:
- op = kThumb2Vsubd;
- break;
- case Instruction::DIV_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE:
- op = kThumb2Vdivd;
- break;
- case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::MUL_DOUBLE:
- op = kThumb2Vmuld;
- break;
- case Instruction::REM_DOUBLE_2ADDR:
- case Instruction::REM_DOUBLE:
- case Instruction::NEG_DOUBLE: {
- return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1,
- rlSrc2);
- }
- default:
- return true;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ op = kThumb2Vaddd;
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ op = kThumb2Vsubd;
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ op = kThumb2Vdivd;
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ op = kThumb2Vmuld;
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE:
+ case Instruction::NEG_DOUBLE: {
+ return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
}
+ default:
+ return true;
+ }
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
- DCHECK(rlSrc1.wide);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
- DCHECK(rlSrc2.wide);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- DCHECK(rlDest.wide);
- DCHECK(rlResult.wide);
- newLIR3(cUnit, (ArmOpcode)op, S2D(rlResult.lowReg, rlResult.highReg),
- S2D(rlSrc1.lowReg, rlSrc1.highReg),
- S2D(rlSrc2.lowReg, rlSrc2.highReg));
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
+ DCHECK(rlSrc1.wide);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
+ DCHECK(rlSrc2.wide);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ DCHECK(rlDest.wide);
+ DCHECK(rlResult.wide);
+ newLIR3(cUnit, (ArmOpcode)op, S2D(rlResult.lowReg, rlResult.highReg),
+ S2D(rlSrc1.lowReg, rlSrc1.highReg),
+ S2D(rlSrc2.lowReg, rlSrc2.highReg));
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
}
bool genConversion(CompilationUnit* cUnit, MIR* mir)
{
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int op = kThumbBkpt;
- bool longSrc = false;
- bool longDest = false;
- int srcReg;
- RegLocation rlSrc;
- RegLocation rlDest;
- RegLocation rlResult;
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ int op = kThumbBkpt;
+ bool longSrc = false;
+ bool longDest = false;
+ int srcReg;
+ RegLocation rlSrc;
+ RegLocation rlDest;
+ RegLocation rlResult;
- switch (opcode) {
- case Instruction::INT_TO_FLOAT:
- longSrc = false;
- longDest = false;
- op = kThumb2VcvtIF;
- break;
- case Instruction::FLOAT_TO_INT:
- longSrc = false;
- longDest = false;
- op = kThumb2VcvtFI;
- break;
- case Instruction::DOUBLE_TO_FLOAT:
- longSrc = true;
- longDest = false;
- op = kThumb2VcvtDF;
- break;
- case Instruction::FLOAT_TO_DOUBLE:
- longSrc = false;
- longDest = true;
- op = kThumb2VcvtFd;
- break;
- case Instruction::INT_TO_DOUBLE:
- longSrc = false;
- longDest = true;
- op = kThumb2VcvtID;
- break;
- case Instruction::DOUBLE_TO_INT:
- longSrc = true;
- longDest = false;
- op = kThumb2VcvtDI;
- break;
- case Instruction::LONG_TO_DOUBLE:
- case Instruction::FLOAT_TO_LONG:
- case Instruction::LONG_TO_FLOAT:
- case Instruction::DOUBLE_TO_LONG:
- return genConversionPortable(cUnit, mir);
- default:
- return true;
- }
- if (longSrc) {
- rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- rlSrc = loadValueWide(cUnit, rlSrc, kFPReg);
- srcReg = S2D(rlSrc.lowReg, rlSrc.highReg);
- } else {
- rlSrc = oatGetSrc(cUnit, mir, 0);
- rlSrc = loadValue(cUnit, rlSrc, kFPReg);
- srcReg = rlSrc.lowReg;
- }
- if (longDest) {
- rlDest = oatGetDestWide(cUnit, mir, 0, 1);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- newLIR2(cUnit, (ArmOpcode)op, S2D(rlResult.lowReg, rlResult.highReg),
- srcReg);
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- rlDest = oatGetDest(cUnit, mir, 0);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- newLIR2(cUnit, (ArmOpcode)op, rlResult.lowReg, srcReg);
- storeValue(cUnit, rlDest, rlResult);
- }
- return false;
+ switch (opcode) {
+ case Instruction::INT_TO_FLOAT:
+ longSrc = false;
+ longDest = false;
+ op = kThumb2VcvtIF;
+ break;
+ case Instruction::FLOAT_TO_INT:
+ longSrc = false;
+ longDest = false;
+ op = kThumb2VcvtFI;
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ longSrc = true;
+ longDest = false;
+ op = kThumb2VcvtDF;
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ longSrc = false;
+ longDest = true;
+ op = kThumb2VcvtFd;
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ longSrc = false;
+ longDest = true;
+ op = kThumb2VcvtID;
+ break;
+ case Instruction::DOUBLE_TO_INT:
+ longSrc = true;
+ longDest = false;
+ op = kThumb2VcvtDI;
+ break;
+ case Instruction::LONG_TO_DOUBLE:
+ case Instruction::FLOAT_TO_LONG:
+ case Instruction::LONG_TO_FLOAT:
+ case Instruction::DOUBLE_TO_LONG:
+ return genConversionPortable(cUnit, mir);
+ default:
+ return true;
+ }
+ if (longSrc) {
+ rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ rlSrc = loadValueWide(cUnit, rlSrc, kFPReg);
+ srcReg = S2D(rlSrc.lowReg, rlSrc.highReg);
+ } else {
+ rlSrc = oatGetSrc(cUnit, mir, 0);
+ rlSrc = loadValue(cUnit, rlSrc, kFPReg);
+ srcReg = rlSrc.lowReg;
+ }
+ if (longDest) {
+ rlDest = oatGetDestWide(cUnit, mir, 0, 1);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR2(cUnit, (ArmOpcode)op, S2D(rlResult.lowReg, rlResult.highReg),
+ srcReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ rlDest = oatGetDest(cUnit, mir, 0);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR2(cUnit, (ArmOpcode)op, rlResult.lowReg, srcReg);
+ storeValue(cUnit, rlDest, rlResult);
+ }
+ return false;
}
void genFusedFPCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
bool gtBias, bool isDouble)
{
- LIR* labelList = (LIR*)cUnit->blockLabelList;
- LIR* target = &labelList[bb->taken->id];
- RegLocation rlSrc1;
- RegLocation rlSrc2;
- if (isDouble) {
- rlSrc1 = oatGetSrcWide(cUnit, mir, 0, 1);
- rlSrc2 = oatGetSrcWide(cUnit, mir, 2, 3);
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
- newLIR2(cUnit, kThumb2Vcmpd, S2D(rlSrc1.lowReg, r1Src2.highReg),
- S2D(rlSrc2.lowReg, rlSrc2.highReg));
- } else {
- rlSrc1 = oatGetSrc(cUnit, mir, 0);
- rlSrc2 = oatGetSrc(cUnit, mir, 1);
- rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
- newLIR2(cUnit, kThumb2Vcmps, rlSrc1.lowReg, rlSrc2.lowReg);
- }
- newLIR0(cUnit, kThumb2Fmstat);
- ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
- switch(ccode) {
- case kCondEq:
- case kCondNe:
- break;
- case kCondLt:
- if (gtBias) {
- ccode = kCondMi;
- }
- break;
- case kCondLe:
- if (gtBias) {
- ccode = kCondLs;
- }
- break;
- case kCondGt:
- if (gtBias) {
- ccode = kCondHi;
- }
- break;
- case kCondGe:
- if (gtBias) {
- ccode = kCondCs;
- }
- break;
- default:
- LOG(FATAL) << "Unexpected ccode: " << (int)ccode;
- }
- opCondBranch(cUnit, ccode, target);
+ LIR* labelList = (LIR*)cUnit->blockLabelList;
+ LIR* target = &labelList[bb->taken->id];
+ RegLocation rlSrc1;
+ RegLocation rlSrc2;
+ if (isDouble) {
+ rlSrc1 = oatGetSrcWide(cUnit, mir, 0, 1);
+ rlSrc2 = oatGetSrcWide(cUnit, mir, 2, 3);
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
+ newLIR2(cUnit, kThumb2Vcmpd, S2D(rlSrc1.lowReg, r1Src2.highReg),
+ S2D(rlSrc2.lowReg, rlSrc2.highReg));
+ } else {
+ rlSrc1 = oatGetSrc(cUnit, mir, 0);
+ rlSrc2 = oatGetSrc(cUnit, mir, 1);
+ rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
+ newLIR2(cUnit, kThumb2Vcmps, rlSrc1.lowReg, rlSrc2.lowReg);
+ }
+ newLIR0(cUnit, kThumb2Fmstat);
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ switch(ccode) {
+ case kCondEq:
+ case kCondNe:
+ break;
+ case kCondLt:
+ if (gtBias) {
+ ccode = kCondMi;
+ }
+ break;
+ case kCondLe:
+ if (gtBias) {
+ ccode = kCondLs;
+ }
+ break;
+ case kCondGt:
+ if (gtBias) {
+ ccode = kCondHi;
+ }
+ break;
+ case kCondGe:
+ if (gtBias) {
+ ccode = kCondCs;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << (int)ccode;
+ }
+ opCondBranch(cUnit, ccode, target);
}
bool genCmpFP(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
- bool isDouble;
- int defaultResult;
- RegLocation rlResult;
+ bool isDouble;
+ int defaultResult;
+ RegLocation rlResult;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::CMPL_FLOAT:
- isDouble = false;
- defaultResult = -1;
- break;
- case Instruction::CMPG_FLOAT:
- isDouble = false;
- defaultResult = 1;
- break;
- case Instruction::CMPL_DOUBLE:
- isDouble = true;
- defaultResult = -1;
- break;
- case Instruction::CMPG_DOUBLE:
- isDouble = true;
- defaultResult = 1;
- break;
- default:
- return true;
- }
- if (isDouble) {
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
- oatClobberSReg(cUnit, rlDest.sRegLow);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- loadConstant(cUnit, rlResult.lowReg, defaultResult);
- newLIR2(cUnit, kThumb2Vcmpd, S2D(rlSrc1.lowReg, r1Src2.highReg),
- S2D(rlSrc2.lowReg, rlSrc2.highReg));
- } else {
- rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
- oatClobberSReg(cUnit, rlDest.sRegLow);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- loadConstant(cUnit, rlResult.lowReg, defaultResult);
- newLIR2(cUnit, kThumb2Vcmps, rlSrc1.lowReg, rlSrc2.lowReg);
- }
- DCHECK(!FPREG(rlResult.lowReg));
- newLIR0(cUnit, kThumb2Fmstat);
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::CMPL_FLOAT:
+ isDouble = false;
+ defaultResult = -1;
+ break;
+ case Instruction::CMPG_FLOAT:
+ isDouble = false;
+ defaultResult = 1;
+ break;
+ case Instruction::CMPL_DOUBLE:
+ isDouble = true;
+ defaultResult = -1;
+ break;
+ case Instruction::CMPG_DOUBLE:
+ isDouble = true;
+ defaultResult = 1;
+ break;
+ default:
+ return true;
+ }
+ if (isDouble) {
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
+ oatClobberSReg(cUnit, rlDest.sRegLow);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ loadConstant(cUnit, rlResult.lowReg, defaultResult);
+ newLIR2(cUnit, kThumb2Vcmpd, S2D(rlSrc1.lowReg, r1Src2.highReg),
+ S2D(rlSrc2.lowReg, rlSrc2.highReg));
+ } else {
+ rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
+ oatClobberSReg(cUnit, rlDest.sRegLow);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ loadConstant(cUnit, rlResult.lowReg, defaultResult);
+ newLIR2(cUnit, kThumb2Vcmps, rlSrc1.lowReg, rlSrc2.lowReg);
+ }
+ DCHECK(!FPREG(rlResult.lowReg));
+ newLIR0(cUnit, kThumb2Fmstat);
- opIT(cUnit, (defaultResult == -1) ? kArmCondGt : kArmCondMi, "");
- newLIR2(cUnit, kThumb2MovImmShift, rlResult.lowReg,
- modifiedImmediate(-defaultResult)); // Must not alter ccodes
- genBarrier(cUnit);
+ opIT(cUnit, (defaultResult == -1) ? kArmCondGt : kArmCondMi, "");
+ newLIR2(cUnit, kThumb2MovImmShift, rlResult.lowReg,
+ modifiedImmediate(-defaultResult)); // Must not alter ccodes
+ genBarrier(cUnit);
- opIT(cUnit, kArmCondEq, "");
- loadConstant(cUnit, rlResult.lowReg, 0);
- genBarrier(cUnit);
+ opIT(cUnit, kArmCondEq, "");
+ loadConstant(cUnit, rlResult.lowReg, 0);
+ genBarrier(cUnit);
- storeValue(cUnit, rlDest, rlResult);
- return false;
+ storeValue(cUnit, rlDest, rlResult);
+ return false;
}
} // namespace art
diff --git a/src/compiler/codegen/arm/Thumb2/Factory.cc b/src/compiler/codegen/arm/Thumb2/Factory.cc
index 0fe937a..533f8b4 100644
--- a/src/compiler/codegen/arm/Thumb2/Factory.cc
+++ b/src/compiler/codegen/arm/Thumb2/Factory.cc
@@ -37,62 +37,62 @@
int encodeImmSingle(int value)
{
- int res;
- int bitA = (value & 0x80000000) >> 31;
- int notBitB = (value & 0x40000000) >> 30;
- int bitB = (value & 0x20000000) >> 29;
- int bSmear = (value & 0x3e000000) >> 25;
- int slice = (value & 0x01f80000) >> 19;
- int zeroes = (value & 0x0007ffff);
- if (zeroes != 0)
- return -1;
- if (bitB) {
- if ((notBitB != 0) || (bSmear != 0x1f))
- return -1;
- } else {
- if ((notBitB != 1) || (bSmear != 0x0))
- return -1;
- }
- res = (bitA << 7) | (bitB << 6) | slice;
- return res;
+ int res;
+ int bitA = (value & 0x80000000) >> 31;
+ int notBitB = (value & 0x40000000) >> 30;
+ int bitB = (value & 0x20000000) >> 29;
+ int bSmear = (value & 0x3e000000) >> 25;
+ int slice = (value & 0x01f80000) >> 19;
+ int zeroes = (value & 0x0007ffff);
+ if (zeroes != 0)
+ return -1;
+ if (bitB) {
+ if ((notBitB != 0) || (bSmear != 0x1f))
+ return -1;
+ } else {
+ if ((notBitB != 1) || (bSmear != 0x0))
+ return -1;
+ }
+ res = (bitA << 7) | (bitB << 6) | slice;
+ return res;
}
LIR* loadFPConstantValue(CompilationUnit* cUnit, int rDest, int value)
{
- int encodedImm = encodeImmSingle(value);
- DCHECK(SINGLEREG(rDest));
- if (encodedImm >= 0) {
- return newLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, encodedImm);
- }
- LIR* dataTarget = scanLiteralPool(cUnit->literalList, value, 0);
- if (dataTarget == NULL) {
- dataTarget = addWordData(cUnit, &cUnit->literalList, value);
- }
- LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2Vldrs,
- rDest, r15pc, 0, 0, 0, dataTarget);
- setMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = (intptr_t)dataTarget;
- oatAppendLIR(cUnit, (LIR* ) loadPcRel);
- return loadPcRel;
+ int encodedImm = encodeImmSingle(value);
+ DCHECK(SINGLEREG(rDest));
+ if (encodedImm >= 0) {
+ return newLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, encodedImm);
+ }
+ LIR* dataTarget = scanLiteralPool(cUnit->literalList, value, 0);
+ if (dataTarget == NULL) {
+ dataTarget = addWordData(cUnit, &cUnit->literalList, value);
+ }
+ LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2Vldrs,
+ rDest, r15pc, 0, 0, 0, dataTarget);
+ setMemRefType(loadPcRel, true, kLiteral);
+ loadPcRel->aliasInfo = (intptr_t)dataTarget;
+ oatAppendLIR(cUnit, (LIR* ) loadPcRel);
+ return loadPcRel;
}
int leadingZeros(u4 val)
{
- u4 alt;
- int n;
- int count;
+ u4 alt;
+ int n;
+ int count;
- count = 16;
- n = 32;
- do {
- alt = val >> count;
- if (alt != 0) {
- n = n - count;
- val = alt;
- }
- count >>= 1;
- } while (count);
- return n - val;
+ count = 16;
+ n = 32;
+ do {
+ alt = val >> count;
+ if (alt != 0) {
+ n = n - count;
+ val = alt;
+ }
+ count >>= 1;
+ } while (count);
+ return n - val;
}
/*
@@ -107,20 +107,20 @@
/* Note: case of value==0 must use 0:000:0:0000000 encoding */
if (value <= 0xFF)
- return b0; // 0:000:a:bcdefgh
+ return b0; // 0:000:a:bcdefgh
if (value == ((b0 << 16) | b0))
- return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
+ return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
- return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
+ return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
b0 = (value >> 8) & 0xff;
if (value == ((b0 << 24) | (b0 << 8)))
- return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
+ return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
/* Can we do it with rotation? */
zLeading = leadingZeros(value);
zTrailing = 32 - leadingZeros(~value & (value - 1));
/* A run of eight or fewer active bits? */
if ((zLeading + zTrailing) < 24)
- return -1; /* No - bail */
+ return -1; /* No - bail */
/* left-justify the constant, discarding msb (known to be 1) */
value <<= zLeading + 1;
/* Create bcdefgh */
@@ -139,443 +139,441 @@
*/
LIR* loadConstantNoClobber(CompilationUnit* cUnit, int rDest, int value)
{
- LIR* res;
- int modImm;
+ LIR* res;
+ int modImm;
- if (FPREG(rDest)) {
- return loadFPConstantValue(cUnit, rDest, value);
- }
+ if (FPREG(rDest)) {
+ return loadFPConstantValue(cUnit, rDest, value);
+ }
- /* See if the value can be constructed cheaply */
- if (LOWREG(rDest) && (value >= 0) && (value <= 255)) {
- return newLIR2(cUnit, kThumbMovImm, rDest, value);
- }
- /* Check Modified immediate special cases */
- modImm = modifiedImmediate(value);
- if (modImm >= 0) {
- res = newLIR2(cUnit, kThumb2MovImmShift, rDest, modImm);
- return res;
- }
- modImm = modifiedImmediate(~value);
- if (modImm >= 0) {
- res = newLIR2(cUnit, kThumb2MvnImm12, rDest, modImm);
- return res;
- }
- /* 16-bit immediate? */
- if ((value & 0xffff) == value) {
- res = newLIR2(cUnit, kThumb2MovImm16, rDest, value);
- return res;
- }
- /* No shortcut - go ahead and use literal pool */
- LIR* dataTarget = scanLiteralPool(cUnit->literalList, value, 0);
- if (dataTarget == NULL) {
- dataTarget = addWordData(cUnit, &cUnit->literalList, value);
- }
- LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
- kThumb2LdrPcRel12, rDest, 0, 0, 0, 0, dataTarget);
- setMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = (intptr_t)dataTarget;
- res = loadPcRel;
- oatAppendLIR(cUnit, (LIR* ) loadPcRel);
-
- /*
- * To save space in the constant pool, we use the ADD_RRI8 instruction to
- * add up to 255 to an existing constant value.
- */
- if (dataTarget->operands[0] != value) {
- opRegImm(cUnit, kOpAdd, rDest, value - dataTarget->operands[0]);
- }
+ /* See if the value can be constructed cheaply */
+ if (LOWREG(rDest) && (value >= 0) && (value <= 255)) {
+ return newLIR2(cUnit, kThumbMovImm, rDest, value);
+ }
+ /* Check Modified immediate special cases */
+ modImm = modifiedImmediate(value);
+ if (modImm >= 0) {
+ res = newLIR2(cUnit, kThumb2MovImmShift, rDest, modImm);
return res;
+ }
+ modImm = modifiedImmediate(~value);
+ if (modImm >= 0) {
+ res = newLIR2(cUnit, kThumb2MvnImm12, rDest, modImm);
+ return res;
+ }
+ /* 16-bit immediate? */
+ if ((value & 0xffff) == value) {
+ res = newLIR2(cUnit, kThumb2MovImm16, rDest, value);
+ return res;
+ }
+ /* No shortcut - go ahead and use literal pool */
+ LIR* dataTarget = scanLiteralPool(cUnit->literalList, value, 0);
+ if (dataTarget == NULL) {
+ dataTarget = addWordData(cUnit, &cUnit->literalList, value);
+ }
+ LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
+ kThumb2LdrPcRel12, rDest, 0, 0, 0, 0, dataTarget);
+ setMemRefType(loadPcRel, true, kLiteral);
+ loadPcRel->aliasInfo = (intptr_t)dataTarget;
+ res = loadPcRel;
+ oatAppendLIR(cUnit, (LIR* ) loadPcRel);
+
+ /*
+ * To save space in the constant pool, we use the ADD_RRI8 instruction to
+ * add up to 255 to an existing constant value.
+ */
+ if (dataTarget->operands[0] != value) {
+ opRegImm(cUnit, kOpAdd, rDest, value - dataTarget->operands[0]);
+ }
+ return res;
}
LIR* opBranchUnconditional(CompilationUnit* cUnit, OpKind op)
{
- DCHECK_EQ(op, kOpUncondBr);
- return newLIR1(cUnit, kThumbBUncond, 0 /* offset to be patched */);
+ DCHECK_EQ(op, kOpUncondBr);
+ return newLIR1(cUnit, kThumbBUncond, 0 /* offset to be patched */);
}
LIR* opCondBranch(CompilationUnit* cUnit, ConditionCode cc, LIR* target)
{
- LIR* branch = newLIR2(cUnit, kThumb2BCond, 0 /* offset to be patched */,
- oatArmConditionEncoding(cc));
- branch->target = target;
- return branch;
+ LIR* branch = newLIR2(cUnit, kThumb2BCond, 0 /* offset to be patched */,
+ oatArmConditionEncoding(cc));
+ branch->target = target;
+ return branch;
}
LIR* opReg(CompilationUnit* cUnit, OpKind op, int rDestSrc)
{
- ArmOpcode opcode = kThumbBkpt;
- switch (op) {
- case kOpBlx:
- opcode = kThumbBlxR;
- break;
- default:
- LOG(FATAL) << "Bad opcode " << (int)op;
- }
- return newLIR1(cUnit, opcode, rDestSrc);
+ ArmOpcode opcode = kThumbBkpt;
+ switch (op) {
+ case kOpBlx:
+ opcode = kThumbBlxR;
+ break;
+ default:
+ LOG(FATAL) << "Bad opcode " << (int)op;
+ }
+ return newLIR1(cUnit, opcode, rDestSrc);
}
LIR* opRegRegShift(CompilationUnit* cUnit, OpKind op, int rDestSrc1,
int rSrc2, int shift)
{
- bool thumbForm = ((shift == 0) && LOWREG(rDestSrc1) && LOWREG(rSrc2));
- ArmOpcode opcode = kThumbBkpt;
- switch (op) {
- case kOpAdc:
- opcode = (thumbForm) ? kThumbAdcRR : kThumb2AdcRRR;
- break;
- case kOpAnd:
- opcode = (thumbForm) ? kThumbAndRR : kThumb2AndRRR;
- break;
- case kOpBic:
- opcode = (thumbForm) ? kThumbBicRR : kThumb2BicRRR;
- break;
- case kOpCmn:
- DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbCmnRR : kThumb2CmnRR;
- break;
- case kOpCmp:
- if (thumbForm)
- opcode = kThumbCmpRR;
- else if ((shift == 0) && !LOWREG(rDestSrc1) && !LOWREG(rSrc2))
- opcode = kThumbCmpHH;
- else if ((shift == 0) && LOWREG(rDestSrc1))
- opcode = kThumbCmpLH;
- else if (shift == 0)
- opcode = kThumbCmpHL;
- else
- opcode = kThumb2CmpRR;
- break;
- case kOpXor:
- opcode = (thumbForm) ? kThumbEorRR : kThumb2EorRRR;
- break;
- case kOpMov:
- DCHECK_EQ(shift, 0);
- if (LOWREG(rDestSrc1) && LOWREG(rSrc2))
- opcode = kThumbMovRR;
- else if (!LOWREG(rDestSrc1) && !LOWREG(rSrc2))
- opcode = kThumbMovRR_H2H;
- else if (LOWREG(rDestSrc1))
- opcode = kThumbMovRR_H2L;
- else
- opcode = kThumbMovRR_L2H;
- break;
- case kOpMul:
- DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbMul : kThumb2MulRRR;
- break;
- case kOpMvn:
- opcode = (thumbForm) ? kThumbMvn : kThumb2MnvRR;
- break;
- case kOpNeg:
- DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbNeg : kThumb2NegRR;
- break;
- case kOpOr:
- opcode = (thumbForm) ? kThumbOrr : kThumb2OrrRRR;
- break;
- case kOpSbc:
- opcode = (thumbForm) ? kThumbSbc : kThumb2SbcRRR;
- break;
- case kOpTst:
- opcode = (thumbForm) ? kThumbTst : kThumb2TstRR;
- break;
- case kOpLsl:
- DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbLslRR : kThumb2LslRRR;
- break;
- case kOpLsr:
- DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbLsrRR : kThumb2LsrRRR;
- break;
- case kOpAsr:
- DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbAsrRR : kThumb2AsrRRR;
- break;
- case kOpRor:
- DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbRorRR : kThumb2RorRRR;
- break;
- case kOpAdd:
- opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR;
- break;
- case kOpSub:
- opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR;
- break;
- case kOp2Byte:
- DCHECK_EQ(shift, 0);
- return newLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 8);
- case kOp2Short:
- DCHECK_EQ(shift, 0);
- return newLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 16);
- case kOp2Char:
- DCHECK_EQ(shift, 0);
- return newLIR4(cUnit, kThumb2Ubfx, rDestSrc1, rSrc2, 0, 16);
- default:
- LOG(FATAL) << "Bad opcode: " << (int)op;
- break;
- }
- DCHECK_GE(static_cast<int>(opcode), 0);
- if (EncodingMap[opcode].flags & IS_BINARY_OP)
- return newLIR2(cUnit, opcode, rDestSrc1, rSrc2);
- else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
- if (EncodingMap[opcode].fieldLoc[2].kind == kFmtShift)
- return newLIR3(cUnit, opcode, rDestSrc1, rSrc2, shift);
- else
- return newLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2);
- } else if (EncodingMap[opcode].flags & IS_QUAD_OP)
- return newLIR4(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2, shift);
- else {
- LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
- }
+ bool thumbForm = ((shift == 0) && LOWREG(rDestSrc1) && LOWREG(rSrc2));
+ ArmOpcode opcode = kThumbBkpt;
+ switch (op) {
+ case kOpAdc:
+ opcode = (thumbForm) ? kThumbAdcRR : kThumb2AdcRRR;
+ break;
+ case kOpAnd:
+ opcode = (thumbForm) ? kThumbAndRR : kThumb2AndRRR;
+ break;
+ case kOpBic:
+ opcode = (thumbForm) ? kThumbBicRR : kThumb2BicRRR;
+ break;
+ case kOpCmn:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumbForm) ? kThumbCmnRR : kThumb2CmnRR;
+ break;
+ case kOpCmp:
+ if (thumbForm)
+ opcode = kThumbCmpRR;
+ else if ((shift == 0) && !LOWREG(rDestSrc1) && !LOWREG(rSrc2))
+ opcode = kThumbCmpHH;
+ else if ((shift == 0) && LOWREG(rDestSrc1))
+ opcode = kThumbCmpLH;
+ else if (shift == 0)
+ opcode = kThumbCmpHL;
+ else
+ opcode = kThumb2CmpRR;
+ break;
+ case kOpXor:
+ opcode = (thumbForm) ? kThumbEorRR : kThumb2EorRRR;
+ break;
+ case kOpMov:
+ DCHECK_EQ(shift, 0);
+ if (LOWREG(rDestSrc1) && LOWREG(rSrc2))
+ opcode = kThumbMovRR;
+ else if (!LOWREG(rDestSrc1) && !LOWREG(rSrc2))
+ opcode = kThumbMovRR_H2H;
+ else if (LOWREG(rDestSrc1))
+ opcode = kThumbMovRR_H2L;
+ else
+ opcode = kThumbMovRR_L2H;
+ break;
+ case kOpMul:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumbForm) ? kThumbMul : kThumb2MulRRR;
+ break;
+ case kOpMvn:
+ opcode = (thumbForm) ? kThumbMvn : kThumb2MnvRR;
+ break;
+ case kOpNeg:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumbForm) ? kThumbNeg : kThumb2NegRR;
+ break;
+ case kOpOr:
+ opcode = (thumbForm) ? kThumbOrr : kThumb2OrrRRR;
+ break;
+ case kOpSbc:
+ opcode = (thumbForm) ? kThumbSbc : kThumb2SbcRRR;
+ break;
+ case kOpTst:
+ opcode = (thumbForm) ? kThumbTst : kThumb2TstRR;
+ break;
+ case kOpLsl:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumbForm) ? kThumbLslRR : kThumb2LslRRR;
+ break;
+ case kOpLsr:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumbForm) ? kThumbLsrRR : kThumb2LsrRRR;
+ break;
+ case kOpAsr:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumbForm) ? kThumbAsrRR : kThumb2AsrRRR;
+ break;
+ case kOpRor:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumbForm) ? kThumbRorRR : kThumb2RorRRR;
+ break;
+ case kOpAdd:
+ opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR;
+ break;
+ case kOpSub:
+ opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR;
+ break;
+ case kOp2Byte:
+ DCHECK_EQ(shift, 0);
+ return newLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 8);
+ case kOp2Short:
+ DCHECK_EQ(shift, 0);
+ return newLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 16);
+ case kOp2Char:
+ DCHECK_EQ(shift, 0);
+ return newLIR4(cUnit, kThumb2Ubfx, rDestSrc1, rSrc2, 0, 16);
+ default:
+ LOG(FATAL) << "Bad opcode: " << (int)op;
+ break;
+ }
+ DCHECK_GE(static_cast<int>(opcode), 0);
+ if (EncodingMap[opcode].flags & IS_BINARY_OP)
+ return newLIR2(cUnit, opcode, rDestSrc1, rSrc2);
+ else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
+ if (EncodingMap[opcode].fieldLoc[2].kind == kFmtShift)
+ return newLIR3(cUnit, opcode, rDestSrc1, rSrc2, shift);
+ else
+ return newLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2);
+ } else if (EncodingMap[opcode].flags & IS_QUAD_OP)
+ return newLIR4(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2, shift);
+ else {
+ LOG(FATAL) << "Unexpected encoding operand count";
+ return NULL;
+ }
}
LIR* opRegReg(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int rSrc2)
{
- return opRegRegShift(cUnit, op, rDestSrc1, rSrc2, 0);
+ return opRegRegShift(cUnit, op, rDestSrc1, rSrc2, 0);
}
LIR* opRegRegRegShift(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
- int rSrc2, int shift)
+ int rSrc2, int shift)
{
- ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (shift == 0) && LOWREG(rDest) && LOWREG(rSrc1) &&
- LOWREG(rSrc2);
- switch (op) {
- case kOpAdd:
- opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR;
- break;
- case kOpSub:
- opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR;
- break;
- case kOpRsub:
- opcode = kThumb2RsubRRR;
- break;
- case kOpAdc:
- opcode = kThumb2AdcRRR;
- break;
- case kOpAnd:
- opcode = kThumb2AndRRR;
- break;
- case kOpBic:
- opcode = kThumb2BicRRR;
- break;
- case kOpXor:
- opcode = kThumb2EorRRR;
- break;
- case kOpMul:
- DCHECK_EQ(shift, 0);
- opcode = kThumb2MulRRR;
- break;
- case kOpOr:
- opcode = kThumb2OrrRRR;
- break;
- case kOpSbc:
- opcode = kThumb2SbcRRR;
- break;
- case kOpLsl:
- DCHECK_EQ(shift, 0);
- opcode = kThumb2LslRRR;
- break;
- case kOpLsr:
- DCHECK_EQ(shift, 0);
- opcode = kThumb2LsrRRR;
- break;
- case kOpAsr:
- DCHECK_EQ(shift, 0);
- opcode = kThumb2AsrRRR;
- break;
- case kOpRor:
- DCHECK_EQ(shift, 0);
- opcode = kThumb2RorRRR;
- break;
- default:
- LOG(FATAL) << "Bad opcode: " << (int)op;
- break;
- }
- DCHECK_GE(static_cast<int>(opcode), 0);
- if (EncodingMap[opcode].flags & IS_QUAD_OP)
- return newLIR4(cUnit, opcode, rDest, rSrc1, rSrc2, shift);
- else {
- DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
- return newLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
- }
+ ArmOpcode opcode = kThumbBkpt;
+ bool thumbForm = (shift == 0) && LOWREG(rDest) && LOWREG(rSrc1) &&
+ LOWREG(rSrc2);
+ switch (op) {
+ case kOpAdd:
+ opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR;
+ break;
+ case kOpSub:
+ opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR;
+ break;
+ case kOpRsub:
+ opcode = kThumb2RsubRRR;
+ break;
+ case kOpAdc:
+ opcode = kThumb2AdcRRR;
+ break;
+ case kOpAnd:
+ opcode = kThumb2AndRRR;
+ break;
+ case kOpBic:
+ opcode = kThumb2BicRRR;
+ break;
+ case kOpXor:
+ opcode = kThumb2EorRRR;
+ break;
+ case kOpMul:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2MulRRR;
+ break;
+ case kOpOr:
+ opcode = kThumb2OrrRRR;
+ break;
+ case kOpSbc:
+ opcode = kThumb2SbcRRR;
+ break;
+ case kOpLsl:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2LslRRR;
+ break;
+ case kOpLsr:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2LsrRRR;
+ break;
+ case kOpAsr:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2AsrRRR;
+ break;
+ case kOpRor:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2RorRRR;
+ break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << (int)op;
+ break;
+ }
+ DCHECK_GE(static_cast<int>(opcode), 0);
+ if (EncodingMap[opcode].flags & IS_QUAD_OP)
+ return newLIR4(cUnit, opcode, rDest, rSrc1, rSrc2, shift);
+ else {
+ DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
+ return newLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
+ }
}
LIR* opRegRegReg(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
int rSrc2)
{
- return opRegRegRegShift(cUnit, op, rDest, rSrc1, rSrc2, 0);
+ return opRegRegRegShift(cUnit, op, rDest, rSrc1, rSrc2, 0);
}
LIR* opRegRegImm(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
int value)
{
- LIR* res;
- bool neg = (value < 0);
- int absValue = (neg) ? -value : value;
- ArmOpcode opcode = kThumbBkpt;
- ArmOpcode altOpcode = kThumbBkpt;
- bool allLowRegs = (LOWREG(rDest) && LOWREG(rSrc1));
- int modImm = modifiedImmediate(value);
- int modImmNeg = modifiedImmediate(-value);
+ LIR* res;
+ bool neg = (value < 0);
+ int absValue = (neg) ? -value : value;
+ ArmOpcode opcode = kThumbBkpt;
+ ArmOpcode altOpcode = kThumbBkpt;
+ bool allLowRegs = (LOWREG(rDest) && LOWREG(rSrc1));
+ int modImm = modifiedImmediate(value);
+ int modImmNeg = modifiedImmediate(-value);
- switch (op) {
- case kOpLsl:
- if (allLowRegs)
- return newLIR3(cUnit, kThumbLslRRI5, rDest, rSrc1, value);
- else
- return newLIR3(cUnit, kThumb2LslRRI5, rDest, rSrc1, value);
- case kOpLsr:
- if (allLowRegs)
- return newLIR3(cUnit, kThumbLsrRRI5, rDest, rSrc1, value);
- else
- return newLIR3(cUnit, kThumb2LsrRRI5, rDest, rSrc1, value);
- case kOpAsr:
- if (allLowRegs)
- return newLIR3(cUnit, kThumbAsrRRI5, rDest, rSrc1, value);
- else
- return newLIR3(cUnit, kThumb2AsrRRI5, rDest, rSrc1, value);
- case kOpRor:
- return newLIR3(cUnit, kThumb2RorRRI5, rDest, rSrc1, value);
- case kOpAdd:
- if (LOWREG(rDest) && (rSrc1 == r13sp) &&
- (value <= 1020) && ((value & 0x3)==0)) {
- return newLIR3(cUnit, kThumbAddSpRel, rDest, rSrc1,
- value >> 2);
- } else if (LOWREG(rDest) && (rSrc1 == r15pc) &&
- (value <= 1020) && ((value & 0x3)==0)) {
- return newLIR3(cUnit, kThumbAddPcRel, rDest, rSrc1,
- value >> 2);
- }
- // Note: intentional fallthrough
- case kOpSub:
- if (allLowRegs && ((absValue & 0x7) == absValue)) {
- if (op == kOpAdd)
- opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
- else
- opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
- return newLIR3(cUnit, opcode, rDest, rSrc1, absValue);
- } else if ((absValue & 0xff) == absValue) {
- if (op == kOpAdd)
- opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
- else
- opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
- return newLIR3(cUnit, opcode, rDest, rSrc1, absValue);
- }
- if (modImmNeg >= 0) {
- op = (op == kOpAdd) ? kOpSub : kOpAdd;
- modImm = modImmNeg;
- }
- if (op == kOpSub) {
- opcode = kThumb2SubRRI8;
- altOpcode = kThumb2SubRRR;
- } else {
- opcode = kThumb2AddRRI8;
- altOpcode = kThumb2AddRRR;
- }
- break;
- case kOpAdc:
- opcode = kThumb2AdcRRI8;
- altOpcode = kThumb2AdcRRR;
- break;
- case kOpSbc:
- opcode = kThumb2SbcRRI8;
- altOpcode = kThumb2SbcRRR;
- break;
- case kOpOr:
- opcode = kThumb2OrrRRI8;
- altOpcode = kThumb2OrrRRR;
- break;
- case kOpAnd:
- opcode = kThumb2AndRRI8;
- altOpcode = kThumb2AndRRR;
- break;
- case kOpXor:
- opcode = kThumb2EorRRI8;
- altOpcode = kThumb2EorRRR;
- break;
- case kOpMul:
- //TUNING: power of 2, shift & add
- modImm = -1;
- altOpcode = kThumb2MulRRR;
- break;
- case kOpCmp: {
- int modImm = modifiedImmediate(value);
- LIR* res;
- if (modImm >= 0) {
- res = newLIR2(cUnit, kThumb2CmpRI8, rSrc1, modImm);
- } else {
- int rTmp = oatAllocTemp(cUnit);
- res = loadConstant(cUnit, rTmp, value);
- opRegReg(cUnit, kOpCmp, rSrc1, rTmp);
- oatFreeTemp(cUnit, rTmp);
- }
- return res;
- }
- default:
- LOG(FATAL) << "Bad opcode: " << (int)op;
- }
-
- if (modImm >= 0) {
- return newLIR3(cUnit, opcode, rDest, rSrc1, modImm);
- } else {
- int rScratch = oatAllocTemp(cUnit);
- loadConstant(cUnit, rScratch, value);
- if (EncodingMap[altOpcode].flags & IS_QUAD_OP)
- res = newLIR4(cUnit, altOpcode, rDest, rSrc1, rScratch, 0);
+ switch (op) {
+ case kOpLsl:
+ if (allLowRegs)
+ return newLIR3(cUnit, kThumbLslRRI5, rDest, rSrc1, value);
+ else
+ return newLIR3(cUnit, kThumb2LslRRI5, rDest, rSrc1, value);
+ case kOpLsr:
+ if (allLowRegs)
+ return newLIR3(cUnit, kThumbLsrRRI5, rDest, rSrc1, value);
+ else
+ return newLIR3(cUnit, kThumb2LsrRRI5, rDest, rSrc1, value);
+ case kOpAsr:
+ if (allLowRegs)
+ return newLIR3(cUnit, kThumbAsrRRI5, rDest, rSrc1, value);
+ else
+ return newLIR3(cUnit, kThumb2AsrRRI5, rDest, rSrc1, value);
+ case kOpRor:
+ return newLIR3(cUnit, kThumb2RorRRI5, rDest, rSrc1, value);
+ case kOpAdd:
+ if (LOWREG(rDest) && (rSrc1 == r13sp) &&
+ (value <= 1020) && ((value & 0x3)==0)) {
+ return newLIR3(cUnit, kThumbAddSpRel, rDest, rSrc1, value >> 2);
+ } else if (LOWREG(rDest) && (rSrc1 == r15pc) &&
+ (value <= 1020) && ((value & 0x3)==0)) {
+ return newLIR3(cUnit, kThumbAddPcRel, rDest, rSrc1, value >> 2);
+ }
+ // Note: intentional fallthrough
+ case kOpSub:
+ if (allLowRegs && ((absValue & 0x7) == absValue)) {
+ if (op == kOpAdd)
+ opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
else
- res = newLIR3(cUnit, altOpcode, rDest, rSrc1, rScratch);
- oatFreeTemp(cUnit, rScratch);
- return res;
+ opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
+ return newLIR3(cUnit, opcode, rDest, rSrc1, absValue);
+ } else if ((absValue & 0xff) == absValue) {
+ if (op == kOpAdd)
+ opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
+ else
+ opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
+ return newLIR3(cUnit, opcode, rDest, rSrc1, absValue);
+ }
+ if (modImmNeg >= 0) {
+ op = (op == kOpAdd) ? kOpSub : kOpAdd;
+ modImm = modImmNeg;
+ }
+ if (op == kOpSub) {
+ opcode = kThumb2SubRRI8;
+ altOpcode = kThumb2SubRRR;
+ } else {
+ opcode = kThumb2AddRRI8;
+ altOpcode = kThumb2AddRRR;
+ }
+ break;
+ case kOpAdc:
+ opcode = kThumb2AdcRRI8;
+ altOpcode = kThumb2AdcRRR;
+ break;
+ case kOpSbc:
+ opcode = kThumb2SbcRRI8;
+ altOpcode = kThumb2SbcRRR;
+ break;
+ case kOpOr:
+ opcode = kThumb2OrrRRI8;
+ altOpcode = kThumb2OrrRRR;
+ break;
+ case kOpAnd:
+ opcode = kThumb2AndRRI8;
+ altOpcode = kThumb2AndRRR;
+ break;
+ case kOpXor:
+ opcode = kThumb2EorRRI8;
+ altOpcode = kThumb2EorRRR;
+ break;
+ case kOpMul:
+ //TUNING: power of 2, shift & add
+ modImm = -1;
+ altOpcode = kThumb2MulRRR;
+ break;
+ case kOpCmp: {
+ int modImm = modifiedImmediate(value);
+ LIR* res;
+ if (modImm >= 0) {
+ res = newLIR2(cUnit, kThumb2CmpRI8, rSrc1, modImm);
+ } else {
+ int rTmp = oatAllocTemp(cUnit);
+ res = loadConstant(cUnit, rTmp, value);
+ opRegReg(cUnit, kOpCmp, rSrc1, rTmp);
+ oatFreeTemp(cUnit, rTmp);
+ }
+ return res;
}
+ default:
+ LOG(FATAL) << "Bad opcode: " << (int)op;
+ }
+
+ if (modImm >= 0) {
+ return newLIR3(cUnit, opcode, rDest, rSrc1, modImm);
+ } else {
+ int rScratch = oatAllocTemp(cUnit);
+ loadConstant(cUnit, rScratch, value);
+ if (EncodingMap[altOpcode].flags & IS_QUAD_OP)
+ res = newLIR4(cUnit, altOpcode, rDest, rSrc1, rScratch, 0);
+ else
+ res = newLIR3(cUnit, altOpcode, rDest, rSrc1, rScratch);
+ oatFreeTemp(cUnit, rScratch);
+ return res;
+ }
}
/* Handle Thumb-only variants here - otherwise punt to opRegRegImm */
LIR* opRegImm(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int value)
{
- bool neg = (value < 0);
- int absValue = (neg) ? -value : value;
- bool shortForm = (((absValue & 0xff) == absValue) && LOWREG(rDestSrc1));
- ArmOpcode opcode = kThumbBkpt;
- switch (op) {
- case kOpAdd:
- if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
- DCHECK_EQ((value & 0x3), 0);
- return newLIR1(cUnit, kThumbAddSpI7, value >> 2);
- } else if (shortForm) {
- opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
- }
- break;
- case kOpSub:
- if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
- DCHECK_EQ((value & 0x3), 0);
- return newLIR1(cUnit, kThumbSubSpI7, value >> 2);
- } else if (shortForm) {
- opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
- }
- break;
- case kOpCmp:
- if (LOWREG(rDestSrc1) && shortForm)
- opcode = (shortForm) ? kThumbCmpRI8 : kThumbCmpRR;
- else if (LOWREG(rDestSrc1))
- opcode = kThumbCmpRR;
- else {
- shortForm = false;
- opcode = kThumbCmpHL;
- }
- break;
- default:
- /* Punt to opRegRegImm - if bad case catch it there */
- shortForm = false;
- break;
- }
- if (shortForm)
- return newLIR2(cUnit, opcode, rDestSrc1, absValue);
- else {
- return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
- }
+ bool neg = (value < 0);
+ int absValue = (neg) ? -value : value;
+ bool shortForm = (((absValue & 0xff) == absValue) && LOWREG(rDestSrc1));
+ ArmOpcode opcode = kThumbBkpt;
+ switch (op) {
+ case kOpAdd:
+ if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
+ DCHECK_EQ((value & 0x3), 0);
+ return newLIR1(cUnit, kThumbAddSpI7, value >> 2);
+ } else if (shortForm) {
+ opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
+ }
+ break;
+ case kOpSub:
+ if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
+ DCHECK_EQ((value & 0x3), 0);
+ return newLIR1(cUnit, kThumbSubSpI7, value >> 2);
+ } else if (shortForm) {
+ opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
+ }
+ break;
+ case kOpCmp:
+ if (LOWREG(rDestSrc1) && shortForm)
+ opcode = (shortForm) ? kThumbCmpRI8 : kThumbCmpRR;
+ else if (LOWREG(rDestSrc1))
+ opcode = kThumbCmpRR;
+ else {
+ shortForm = false;
+ opcode = kThumbCmpHL;
+ }
+ break;
+ default:
+ /* Punt to opRegRegImm - if bad case catch it there */
+ shortForm = false;
+ break;
+ }
+ if (shortForm)
+ return newLIR2(cUnit, opcode, rDestSrc1, absValue);
+ else {
+ return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ }
}
/*
@@ -584,193 +582,191 @@
*/
int encodeImmDoubleHigh(int value)
{
- int res;
- int bitA = (value & 0x80000000) >> 31;
- int notBitB = (value & 0x40000000) >> 30;
- int bitB = (value & 0x20000000) >> 29;
- int bSmear = (value & 0x3fc00000) >> 22;
- int slice = (value & 0x003f0000) >> 16;
- int zeroes = (value & 0x0000ffff);
- if (zeroes != 0)
- return -1;
- if (bitB) {
- if ((notBitB != 0) || (bSmear != 0xff))
- return -1;
- } else {
- if ((notBitB != 1) || (bSmear != 0x0))
- return -1;
- }
- res = (bitA << 7) | (bitB << 6) | slice;
- return res;
+ int res;
+ int bitA = (value & 0x80000000) >> 31;
+ int notBitB = (value & 0x40000000) >> 30;
+ int bitB = (value & 0x20000000) >> 29;
+ int bSmear = (value & 0x3fc00000) >> 22;
+ int slice = (value & 0x003f0000) >> 16;
+ int zeroes = (value & 0x0000ffff);
+ if (zeroes != 0)
+ return -1;
+ if (bitB) {
+ if ((notBitB != 0) || (bSmear != 0xff))
+ return -1;
+ } else {
+ if ((notBitB != 1) || (bSmear != 0x0))
+ return -1;
+ }
+ res = (bitA << 7) | (bitB << 6) | slice;
+ return res;
}
int encodeImmDouble(int valLo, int valHi)
{
- int res = -1;
- if (valLo == 0)
- res = encodeImmDoubleHigh(valHi);
- return res;
+ int res = -1;
+ if (valLo == 0)
+ res = encodeImmDoubleHigh(valHi);
+ return res;
}
LIR* loadConstantValueWide(CompilationUnit* cUnit, int rDestLo, int rDestHi,
- int valLo, int valHi)
+ int valLo, int valHi)
{
- int encodedImm = encodeImmDouble(valLo, valHi);
- LIR* res;
- if (FPREG(rDestLo)) {
- if (encodedImm >= 0) {
- res = newLIR2(cUnit, kThumb2Vmovd_IMM8, S2D(rDestLo, rDestHi),
- encodedImm);
- } else {
- LIR* dataTarget = scanLiteralPoolWide(cUnit->literalList, valLo,
- valHi);
- if (dataTarget == NULL) {
- dataTarget = addWideData(cUnit, &cUnit->literalList, valLo,
- valHi);
- }
- LIR* loadPcRel = rawLIR(cUnit, cUnit->currentDalvikOffset,
- kThumb2Vldrd, S2D(rDestLo, rDestHi),
- r15pc, 0, 0, 0, dataTarget);
- setMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = (intptr_t)dataTarget;
- oatAppendLIR(cUnit, (LIR* ) loadPcRel);
- res = loadPcRel;
- }
+ int encodedImm = encodeImmDouble(valLo, valHi);
+ LIR* res;
+ if (FPREG(rDestLo)) {
+ if (encodedImm >= 0) {
+ res = newLIR2(cUnit, kThumb2Vmovd_IMM8, S2D(rDestLo, rDestHi),
+ encodedImm);
} else {
- res = loadConstantNoClobber(cUnit, rDestLo, valLo);
- loadConstantNoClobber(cUnit, rDestHi, valHi);
+ LIR* dataTarget = scanLiteralPoolWide(cUnit->literalList, valLo, valHi);
+ if (dataTarget == NULL) {
+ dataTarget = addWideData(cUnit, &cUnit->literalList, valLo, valHi);
+ }
+ LIR* loadPcRel =
+ rawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2Vldrd,
+ S2D(rDestLo, rDestHi), r15pc, 0, 0, 0, dataTarget);
+ setMemRefType(loadPcRel, true, kLiteral);
+ loadPcRel->aliasInfo = (intptr_t)dataTarget;
+ oatAppendLIR(cUnit, (LIR* ) loadPcRel);
+ res = loadPcRel;
}
- return res;
+ } else {
+ res = loadConstantNoClobber(cUnit, rDestLo, valLo);
+ loadConstantNoClobber(cUnit, rDestHi, valHi);
+ }
+ return res;
}
int encodeShift(int code, int amount) {
- return ((amount & 0x1f) << 2) | code;
+ return ((amount & 0x1f) << 2) | code;
}
LIR* loadBaseIndexed(CompilationUnit* cUnit, int rBase, int rIndex, int rDest,
int scale, OpSize size)
{
- bool allLowRegs = LOWREG(rBase) && LOWREG(rIndex) && LOWREG(rDest);
- LIR* load;
- ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (allLowRegs && (scale == 0));
- int regPtr;
+ bool allLowRegs = LOWREG(rBase) && LOWREG(rIndex) && LOWREG(rDest);
+ LIR* load;
+ ArmOpcode opcode = kThumbBkpt;
+ bool thumbForm = (allLowRegs && (scale == 0));
+ int regPtr;
- if (FPREG(rDest)) {
- if (SINGLEREG(rDest)) {
- DCHECK((size == kWord) || (size == kSingle));
- opcode = kThumb2Vldrs;
- size = kSingle;
- } else {
- DCHECK(DOUBLEREG(rDest));
- DCHECK((size == kLong) || (size == kDouble));
- DCHECK((rDest & 0x1) == 0);
- opcode = kThumb2Vldrd;
- size = kDouble;
- }
+ if (FPREG(rDest)) {
+ if (SINGLEREG(rDest)) {
+ DCHECK((size == kWord) || (size == kSingle));
+ opcode = kThumb2Vldrs;
+ size = kSingle;
} else {
- if (size == kSingle)
- size = kWord;
+ DCHECK(DOUBLEREG(rDest));
+ DCHECK((size == kLong) || (size == kDouble));
+ DCHECK((rDest & 0x1) == 0);
+ opcode = kThumb2Vldrd;
+ size = kDouble;
}
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
- switch (size) {
- case kDouble: // fall-through
- case kSingle:
- regPtr = oatAllocTemp(cUnit);
- if (scale) {
- newLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex,
- encodeShift(kArmLsl, scale));
- } else {
- opRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex);
- }
- load = newLIR3(cUnit, opcode, rDest, regPtr, 0);
- oatFreeTemp(cUnit, regPtr);
- return load;
- case kWord:
- opcode = (thumbForm) ? kThumbLdrRRR : kThumb2LdrRRR;
- break;
- case kUnsignedHalf:
- opcode = (thumbForm) ? kThumbLdrhRRR : kThumb2LdrhRRR;
- break;
- case kSignedHalf:
- opcode = (thumbForm) ? kThumbLdrshRRR : kThumb2LdrshRRR;
- break;
- case kUnsignedByte:
- opcode = (thumbForm) ? kThumbLdrbRRR : kThumb2LdrbRRR;
- break;
- case kSignedByte:
- opcode = (thumbForm) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
- break;
- default:
- LOG(FATAL) << "Bad size: " << (int)size;
- }
- if (thumbForm)
- load = newLIR3(cUnit, opcode, rDest, rBase, rIndex);
- else
- load = newLIR4(cUnit, opcode, rDest, rBase, rIndex, scale);
+ switch (size) {
+ case kDouble: // fall-through
+ case kSingle:
+ regPtr = oatAllocTemp(cUnit);
+ if (scale) {
+ newLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex,
+ encodeShift(kArmLsl, scale));
+ } else {
+ opRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex);
+ }
+ load = newLIR3(cUnit, opcode, rDest, regPtr, 0);
+ oatFreeTemp(cUnit, regPtr);
+ return load;
+ case kWord:
+ opcode = (thumbForm) ? kThumbLdrRRR : kThumb2LdrRRR;
+ break;
+ case kUnsignedHalf:
+ opcode = (thumbForm) ? kThumbLdrhRRR : kThumb2LdrhRRR;
+ break;
+ case kSignedHalf:
+ opcode = (thumbForm) ? kThumbLdrshRRR : kThumb2LdrshRRR;
+ break;
+ case kUnsignedByte:
+ opcode = (thumbForm) ? kThumbLdrbRRR : kThumb2LdrbRRR;
+ break;
+ case kSignedByte:
+ opcode = (thumbForm) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
+ break;
+ default:
+ LOG(FATAL) << "Bad size: " << (int)size;
+ }
+ if (thumbForm)
+ load = newLIR3(cUnit, opcode, rDest, rBase, rIndex);
+ else
+ load = newLIR4(cUnit, opcode, rDest, rBase, rIndex, scale);
- return load;
+ return load;
}
LIR* storeBaseIndexed(CompilationUnit* cUnit, int rBase, int rIndex, int rSrc,
int scale, OpSize size)
{
- bool allLowRegs = LOWREG(rBase) && LOWREG(rIndex) && LOWREG(rSrc);
- LIR* store;
- ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (allLowRegs && (scale == 0));
- int regPtr;
+ bool allLowRegs = LOWREG(rBase) && LOWREG(rIndex) && LOWREG(rSrc);
+ LIR* store;
+ ArmOpcode opcode = kThumbBkpt;
+ bool thumbForm = (allLowRegs && (scale == 0));
+ int regPtr;
- if (FPREG(rSrc)) {
- if (SINGLEREG(rSrc)) {
- DCHECK((size == kWord) || (size == kSingle));
- opcode = kThumb2Vstrs;
- size = kSingle;
- } else {
- DCHECK(DOUBLEREG(rSrc));
- DCHECK((size == kLong) || (size == kDouble));
- DCHECK((rSrc & 0x1) == 0);
- opcode = kThumb2Vstrd;
- size = kDouble;
- }
+ if (FPREG(rSrc)) {
+ if (SINGLEREG(rSrc)) {
+ DCHECK((size == kWord) || (size == kSingle));
+ opcode = kThumb2Vstrs;
+ size = kSingle;
} else {
- if (size == kSingle)
- size = kWord;
+ DCHECK(DOUBLEREG(rSrc));
+ DCHECK((size == kLong) || (size == kDouble));
+ DCHECK((rSrc & 0x1) == 0);
+ opcode = kThumb2Vstrd;
+ size = kDouble;
}
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
- switch (size) {
- case kDouble: // fall-through
- case kSingle:
- regPtr = oatAllocTemp(cUnit);
- if (scale) {
- newLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex,
- encodeShift(kArmLsl, scale));
- } else {
- opRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex);
- }
- store = newLIR3(cUnit, opcode, rSrc, regPtr, 0);
- oatFreeTemp(cUnit, regPtr);
- return store;
- case kWord:
- opcode = (thumbForm) ? kThumbStrRRR : kThumb2StrRRR;
- break;
- case kUnsignedHalf:
- case kSignedHalf:
- opcode = (thumbForm) ? kThumbStrhRRR : kThumb2StrhRRR;
- break;
- case kUnsignedByte:
- case kSignedByte:
- opcode = (thumbForm) ? kThumbStrbRRR : kThumb2StrbRRR;
- break;
- default:
- LOG(FATAL) << "Bad size: " << (int)size;
- }
- if (thumbForm)
- store = newLIR3(cUnit, opcode, rSrc, rBase, rIndex);
- else
- store = newLIR4(cUnit, opcode, rSrc, rBase, rIndex, scale);
+ switch (size) {
+ case kDouble: // fall-through
+ case kSingle:
+ regPtr = oatAllocTemp(cUnit);
+ if (scale) {
+ newLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex,
+ encodeShift(kArmLsl, scale));
+ } else {
+ opRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex);
+ }
+ store = newLIR3(cUnit, opcode, rSrc, regPtr, 0);
+ oatFreeTemp(cUnit, regPtr);
+ return store;
+ case kWord:
+ opcode = (thumbForm) ? kThumbStrRRR : kThumb2StrRRR;
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = (thumbForm) ? kThumbStrhRRR : kThumb2StrhRRR;
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = (thumbForm) ? kThumbStrbRRR : kThumb2StrbRRR;
+ break;
+ default:
+ LOG(FATAL) << "Bad size: " << (int)size;
+ }
+ if (thumbForm)
+ store = newLIR3(cUnit, opcode, rSrc, rBase, rIndex);
+ else
+ store = newLIR4(cUnit, opcode, rSrc, rBase, rIndex, scale);
- return store;
+ return store;
}
/*
@@ -782,267 +778,266 @@
int displacement, int rDest, int rDestHi, OpSize size,
int sReg)
{
- LIR* res;
- LIR* load;
- ArmOpcode opcode = kThumbBkpt;
- bool shortForm = false;
- bool thumb2Form = (displacement < 4092 && displacement >= 0);
- bool allLowRegs = (LOWREG(rBase) && LOWREG(rDest));
- int encodedDisp = displacement;
- bool is64bit = false;
- switch (size) {
- case kDouble:
- case kLong:
- is64bit = true;
- if (FPREG(rDest)) {
- if (SINGLEREG(rDest)) {
- DCHECK(FPREG(rDestHi));
- rDest = S2D(rDest, rDestHi);
- }
- opcode = kThumb2Vldrd;
- if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
- }
- break;
- } else {
- res = loadBaseDispBody(cUnit, mir, rBase, displacement, rDest,
- -1, kWord, sReg);
- loadBaseDispBody(cUnit, NULL, rBase, displacement + 4, rDestHi,
- -1, kWord, INVALID_SREG);
- return res;
- }
- case kSingle:
- case kWord:
- if (FPREG(rDest)) {
- opcode = kThumb2Vldrs;
- if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
- }
- break;
- }
- if (LOWREG(rDest) && (rBase == r15pc) &&
- (displacement <= 1020) && (displacement >= 0)) {
- shortForm = true;
- encodedDisp >>= 2;
- opcode = kThumbLdrPcRel;
- } else if (LOWREG(rDest) && (rBase == r13sp) &&
- (displacement <= 1020) && (displacement >= 0)) {
- shortForm = true;
- encodedDisp >>= 2;
- opcode = kThumbLdrSpRel;
- } else if (allLowRegs && displacement < 128 && displacement >= 0) {
- DCHECK_EQ((displacement & 0x3), 0);
- shortForm = true;
- encodedDisp >>= 2;
- opcode = kThumbLdrRRI5;
- } else if (thumb2Form) {
- shortForm = true;
- opcode = kThumb2LdrRRI12;
- }
- break;
- case kUnsignedHalf:
- if (allLowRegs && displacement < 64 && displacement >= 0) {
- DCHECK_EQ((displacement & 0x1), 0);
- shortForm = true;
- encodedDisp >>= 1;
- opcode = kThumbLdrhRRI5;
- } else if (displacement < 4092 && displacement >= 0) {
- shortForm = true;
- opcode = kThumb2LdrhRRI12;
- }
- break;
- case kSignedHalf:
- if (thumb2Form) {
- shortForm = true;
- opcode = kThumb2LdrshRRI12;
- }
- break;
- case kUnsignedByte:
- if (allLowRegs && displacement < 32 && displacement >= 0) {
- shortForm = true;
- opcode = kThumbLdrbRRI5;
- } else if (thumb2Form) {
- shortForm = true;
- opcode = kThumb2LdrbRRI12;
- }
- break;
- case kSignedByte:
- if (thumb2Form) {
- shortForm = true;
- opcode = kThumb2LdrsbRRI12;
- }
- break;
- default:
- LOG(FATAL) << "Bad size: " << (int)size;
- }
+ LIR* res;
+ LIR* load;
+ ArmOpcode opcode = kThumbBkpt;
+ bool shortForm = false;
+ bool thumb2Form = (displacement < 4092 && displacement >= 0);
+ bool allLowRegs = (LOWREG(rBase) && LOWREG(rDest));
+ int encodedDisp = displacement;
+ bool is64bit = false;
+ switch (size) {
+ case kDouble:
+ case kLong:
+ is64bit = true;
+ if (FPREG(rDest)) {
+ if (SINGLEREG(rDest)) {
+ DCHECK(FPREG(rDestHi));
+ rDest = S2D(rDest, rDestHi);
+ }
+ opcode = kThumb2Vldrd;
+ if (displacement <= 1020) {
+ shortForm = true;
+ encodedDisp >>= 2;
+ }
+ break;
+ } else {
+ res = loadBaseDispBody(cUnit, mir, rBase, displacement, rDest,
+ -1, kWord, sReg);
+ loadBaseDispBody(cUnit, NULL, rBase, displacement + 4, rDestHi,
+ -1, kWord, INVALID_SREG);
+ return res;
+ }
+ case kSingle:
+ case kWord:
+ if (FPREG(rDest)) {
+ opcode = kThumb2Vldrs;
+ if (displacement <= 1020) {
+ shortForm = true;
+ encodedDisp >>= 2;
+ }
+ break;
+ }
+ if (LOWREG(rDest) && (rBase == r15pc) &&
+ (displacement <= 1020) && (displacement >= 0)) {
+ shortForm = true;
+ encodedDisp >>= 2;
+ opcode = kThumbLdrPcRel;
+ } else if (LOWREG(rDest) && (rBase == r13sp) &&
+ (displacement <= 1020) && (displacement >= 0)) {
+ shortForm = true;
+ encodedDisp >>= 2;
+ opcode = kThumbLdrSpRel;
+ } else if (allLowRegs && displacement < 128 && displacement >= 0) {
+ DCHECK_EQ((displacement & 0x3), 0);
+ shortForm = true;
+ encodedDisp >>= 2;
+ opcode = kThumbLdrRRI5;
+ } else if (thumb2Form) {
+ shortForm = true;
+ opcode = kThumb2LdrRRI12;
+ }
+ break;
+ case kUnsignedHalf:
+ if (allLowRegs && displacement < 64 && displacement >= 0) {
+ DCHECK_EQ((displacement & 0x1), 0);
+ shortForm = true;
+ encodedDisp >>= 1;
+ opcode = kThumbLdrhRRI5;
+ } else if (displacement < 4092 && displacement >= 0) {
+ shortForm = true;
+ opcode = kThumb2LdrhRRI12;
+ }
+ break;
+ case kSignedHalf:
+ if (thumb2Form) {
+ shortForm = true;
+ opcode = kThumb2LdrshRRI12;
+ }
+ break;
+ case kUnsignedByte:
+ if (allLowRegs && displacement < 32 && displacement >= 0) {
+ shortForm = true;
+ opcode = kThumbLdrbRRI5;
+ } else if (thumb2Form) {
+ shortForm = true;
+ opcode = kThumb2LdrbRRI12;
+ }
+ break;
+ case kSignedByte:
+ if (thumb2Form) {
+ shortForm = true;
+ opcode = kThumb2LdrsbRRI12;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Bad size: " << (int)size;
+ }
- if (shortForm) {
- load = res = newLIR3(cUnit, opcode, rDest, rBase, encodedDisp);
- } else {
- int regOffset = oatAllocTemp(cUnit);
- res = loadConstant(cUnit, regOffset, encodedDisp);
- load = loadBaseIndexed(cUnit, rBase, regOffset, rDest, 0, size);
- oatFreeTemp(cUnit, regOffset);
- }
+ if (shortForm) {
+ load = res = newLIR3(cUnit, opcode, rDest, rBase, encodedDisp);
+ } else {
+ int regOffset = oatAllocTemp(cUnit);
+ res = loadConstant(cUnit, regOffset, encodedDisp);
+ load = loadBaseIndexed(cUnit, rBase, regOffset, rDest, 0, size);
+ oatFreeTemp(cUnit, regOffset);
+ }
- // TODO: in future may need to differentiate Dalvik accesses w/ spills
- if (rBase == rSP) {
- annotateDalvikRegAccess(load, displacement >> 2, true /* isLoad */, is64bit);
- }
- return load;
+ // TODO: in future may need to differentiate Dalvik accesses w/ spills
+ if (rBase == rSP) {
+ annotateDalvikRegAccess(load, displacement >> 2, true /* isLoad */, is64bit);
+ }
+ return load;
}
LIR* loadBaseDisp(CompilationUnit* cUnit, MIR* mir, int rBase,
int displacement, int rDest, OpSize size, int sReg)
{
- return loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, -1,
- size, sReg);
+ return loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, -1, size,
+ sReg);
}
LIR* loadBaseDispWide(CompilationUnit* cUnit, MIR* mir, int rBase,
int displacement, int rDestLo, int rDestHi, int sReg)
{
- return loadBaseDispBody(cUnit, mir, rBase, displacement, rDestLo, rDestHi,
- kLong, sReg);
+ return loadBaseDispBody(cUnit, mir, rBase, displacement, rDestLo, rDestHi,
+ kLong, sReg);
}
LIR* storeBaseDispBody(CompilationUnit* cUnit, int rBase, int displacement,
int rSrc, int rSrcHi, OpSize size)
{
- LIR* res, *store;
- ArmOpcode opcode = kThumbBkpt;
- bool shortForm = false;
- bool thumb2Form = (displacement < 4092 && displacement >= 0);
- bool allLowRegs = (LOWREG(rBase) && LOWREG(rSrc));
- int encodedDisp = displacement;
- bool is64bit = false;
- switch (size) {
- case kLong:
- case kDouble:
- is64bit = true;
- if (!FPREG(rSrc)) {
- res = storeBaseDispBody(cUnit, rBase, displacement, rSrc,
- -1, kWord);
- storeBaseDispBody(cUnit, rBase, displacement + 4, rSrcHi,
- -1, kWord);
- return res;
- }
- if (SINGLEREG(rSrc)) {
- DCHECK(FPREG(rSrcHi));
- rSrc = S2D(rSrc, rSrcHi);
- }
- opcode = kThumb2Vstrd;
- if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
- }
- break;
- case kSingle:
- case kWord:
- if (FPREG(rSrc)) {
- DCHECK(SINGLEREG(rSrc));
- opcode = kThumb2Vstrs;
- if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
- }
- break;
- }
- if (allLowRegs && displacement < 128 && displacement >= 0) {
- DCHECK_EQ((displacement & 0x3), 0);
- shortForm = true;
- encodedDisp >>= 2;
- opcode = kThumbStrRRI5;
- } else if (thumb2Form) {
- shortForm = true;
- opcode = kThumb2StrRRI12;
- }
- break;
- case kUnsignedHalf:
- case kSignedHalf:
- if (allLowRegs && displacement < 64 && displacement >= 0) {
- DCHECK_EQ((displacement & 0x1), 0);
- shortForm = true;
- encodedDisp >>= 1;
- opcode = kThumbStrhRRI5;
- } else if (thumb2Form) {
- shortForm = true;
- opcode = kThumb2StrhRRI12;
- }
- break;
- case kUnsignedByte:
- case kSignedByte:
- if (allLowRegs && displacement < 32 && displacement >= 0) {
- shortForm = true;
- opcode = kThumbStrbRRI5;
- } else if (thumb2Form) {
- shortForm = true;
- opcode = kThumb2StrbRRI12;
- }
- break;
- default:
- LOG(FATAL) << "Bad size: " << (int)size;
- }
- if (shortForm) {
- store = res = newLIR3(cUnit, opcode, rSrc, rBase, encodedDisp);
- } else {
- int rScratch = oatAllocTemp(cUnit);
- res = loadConstant(cUnit, rScratch, encodedDisp);
- store = storeBaseIndexed(cUnit, rBase, rScratch, rSrc, 0, size);
- oatFreeTemp(cUnit, rScratch);
- }
+ LIR* res, *store;
+ ArmOpcode opcode = kThumbBkpt;
+ bool shortForm = false;
+ bool thumb2Form = (displacement < 4092 && displacement >= 0);
+ bool allLowRegs = (LOWREG(rBase) && LOWREG(rSrc));
+ int encodedDisp = displacement;
+ bool is64bit = false;
+ switch (size) {
+ case kLong:
+ case kDouble:
+ is64bit = true;
+ if (!FPREG(rSrc)) {
+ res = storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, kWord);
+ storeBaseDispBody(cUnit, rBase, displacement + 4, rSrcHi, -1, kWord);
+ return res;
+ }
+ if (SINGLEREG(rSrc)) {
+ DCHECK(FPREG(rSrcHi));
+ rSrc = S2D(rSrc, rSrcHi);
+ }
+ opcode = kThumb2Vstrd;
+ if (displacement <= 1020) {
+ shortForm = true;
+ encodedDisp >>= 2;
+ }
+ break;
+ case kSingle:
+ case kWord:
+ if (FPREG(rSrc)) {
+ DCHECK(SINGLEREG(rSrc));
+ opcode = kThumb2Vstrs;
+ if (displacement <= 1020) {
+ shortForm = true;
+ encodedDisp >>= 2;
+ }
+ break;
+ }
+ if (allLowRegs && displacement < 128 && displacement >= 0) {
+ DCHECK_EQ((displacement & 0x3), 0);
+ shortForm = true;
+ encodedDisp >>= 2;
+ opcode = kThumbStrRRI5;
+ } else if (thumb2Form) {
+ shortForm = true;
+ opcode = kThumb2StrRRI12;
+ }
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ if (allLowRegs && displacement < 64 && displacement >= 0) {
+ DCHECK_EQ((displacement & 0x1), 0);
+ shortForm = true;
+ encodedDisp >>= 1;
+ opcode = kThumbStrhRRI5;
+ } else if (thumb2Form) {
+ shortForm = true;
+ opcode = kThumb2StrhRRI12;
+ }
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ if (allLowRegs && displacement < 32 && displacement >= 0) {
+ shortForm = true;
+ opcode = kThumbStrbRRI5;
+ } else if (thumb2Form) {
+ shortForm = true;
+ opcode = kThumb2StrbRRI12;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Bad size: " << (int)size;
+ }
+ if (shortForm) {
+ store = res = newLIR3(cUnit, opcode, rSrc, rBase, encodedDisp);
+ } else {
+ int rScratch = oatAllocTemp(cUnit);
+ res = loadConstant(cUnit, rScratch, encodedDisp);
+ store = storeBaseIndexed(cUnit, rBase, rScratch, rSrc, 0, size);
+ oatFreeTemp(cUnit, rScratch);
+ }
- // TODO: In future, may need to differentiate Dalvik & spill accesses
- if (rBase == rSP) {
- annotateDalvikRegAccess(store, displacement >> 2, false /* isLoad */, is64bit);
- }
- return res;
+ // TODO: In future, may need to differentiate Dalvik & spill accesses
+ if (rBase == rSP) {
+ annotateDalvikRegAccess(store, displacement >> 2, false /* isLoad */,
+ is64bit);
+ }
+ return res;
}
LIR* storeBaseDisp(CompilationUnit* cUnit, int rBase, int displacement,
int rSrc, OpSize size)
{
- return storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
+ return storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
}
LIR* storeBaseDispWide(CompilationUnit* cUnit, int rBase, int displacement,
int rSrcLo, int rSrcHi)
{
- return storeBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
+ return storeBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
}
void storePair(CompilationUnit* cUnit, int base, int lowReg, int highReg)
{
- storeBaseDispWide(cUnit, base, 0, lowReg, highReg);
+ storeBaseDispWide(cUnit, base, 0, lowReg, highReg);
}
void loadPair(CompilationUnit* cUnit, int base, int lowReg, int highReg)
{
- loadBaseDispWide(cUnit, NULL, base, 0, lowReg, highReg, INVALID_SREG);
+ loadBaseDispWide(cUnit, NULL, base, 0, lowReg, highReg, INVALID_SREG);
}
LIR* fpRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
{
- int opcode;
- DCHECK_EQ(DOUBLEREG(rDest), DOUBLEREG(rSrc));
- if (DOUBLEREG(rDest)) {
- opcode = kThumb2Vmovd;
+ int opcode;
+ DCHECK_EQ(DOUBLEREG(rDest), DOUBLEREG(rSrc));
+ if (DOUBLEREG(rDest)) {
+ opcode = kThumb2Vmovd;
+ } else {
+ if (SINGLEREG(rDest)) {
+ opcode = SINGLEREG(rSrc) ? kThumb2Vmovs : kThumb2Fmsr;
} else {
- if (SINGLEREG(rDest)) {
- opcode = SINGLEREG(rSrc) ? kThumb2Vmovs : kThumb2Fmsr;
- } else {
- DCHECK(SINGLEREG(rSrc));
- opcode = kThumb2Fmrs;
- }
+ DCHECK(SINGLEREG(rSrc));
+ opcode = kThumb2Fmrs;
}
- LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
+ }
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
+ if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
}
diff --git a/src/compiler/codegen/arm/Thumb2/Gen.cc b/src/compiler/codegen/arm/Thumb2/Gen.cc
index 5451d57..5252b49 100644
--- a/src/compiler/codegen/arm/Thumb2/Gen.cc
+++ b/src/compiler/codegen/arm/Thumb2/Gen.cc
@@ -31,8 +31,8 @@
/* Return the position of an ssa name within the argument list */
int inPosition(CompilationUnit* cUnit, int sReg)
{
- int vReg = SRegToVReg(cUnit, sReg);
- return vReg - cUnit->numRegs;
+ int vReg = SRegToVReg(cUnit, sReg);
+ return vReg - cUnit->numRegs;
}
/*
@@ -42,27 +42,27 @@
*/
RegLocation argLoc(CompilationUnit* cUnit, RegLocation loc)
{
- int argNum = inPosition(cUnit, loc.sRegLow);
- if (loc.wide) {
- if (argNum == 2) {
- // Bad case - half in register, half in frame. Just punt
- loc.location = kLocInvalid;
- } else if (argNum < 2) {
- loc.lowReg = rARG1 + argNum;
- loc.highReg = loc.lowReg + 1;
- loc.location = kLocPhysReg;
- } else {
- loc.location = kLocDalvikFrame;
- }
+ int argNum = inPosition(cUnit, loc.sRegLow);
+ if (loc.wide) {
+ if (argNum == 2) {
+ // Bad case - half in register, half in frame. Just punt
+ loc.location = kLocInvalid;
+ } else if (argNum < 2) {
+ loc.lowReg = rARG1 + argNum;
+ loc.highReg = loc.lowReg + 1;
+ loc.location = kLocPhysReg;
} else {
- if (argNum < 3) {
- loc.lowReg = rARG1 + argNum;
- loc.location = kLocPhysReg;
- } else {
- loc.location = kLocDalvikFrame;
- }
+ loc.location = kLocDalvikFrame;
}
- return loc;
+ } else {
+ if (argNum < 3) {
+ loc.lowReg = rARG1 + argNum;
+ loc.location = kLocPhysReg;
+ } else {
+ loc.location = kLocDalvikFrame;
+ }
+ }
+ return loc;
}
/*
@@ -72,243 +72,241 @@
*/
RegLocation loadArg(CompilationUnit* cUnit, RegLocation loc)
{
- if (loc.location == kLocDalvikFrame) {
- int start = (inPosition(cUnit, loc.sRegLow) + 1) * sizeof(uint32_t);
- loc.lowReg = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rSP, start, loc.lowReg);
- if (loc.wide) {
- loc.highReg = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rSP, start + sizeof(uint32_t), loc.highReg);
- }
- loc.location = kLocPhysReg;
+ if (loc.location == kLocDalvikFrame) {
+ int start = (inPosition(cUnit, loc.sRegLow) + 1) * sizeof(uint32_t);
+ loc.lowReg = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rSP, start, loc.lowReg);
+ if (loc.wide) {
+ loc.highReg = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rSP, start + sizeof(uint32_t), loc.highReg);
}
- return loc;
+ loc.location = kLocPhysReg;
+ }
+ return loc;
}
/* Lock any referenced arguments that arrive in registers */
void lockLiveArgs(CompilationUnit* cUnit, MIR* mir)
{
- int firstIn = cUnit->numRegs;
- const int numArgRegs = 3; // TODO: generalize & move to RegUtil.cc
- for (int i = 0; i < mir->ssaRep->numUses; i++) {
- int vReg = SRegToVReg(cUnit, mir->ssaRep->uses[i]);
- int inPosition = vReg - firstIn;
- if (inPosition < numArgRegs) {
- oatLockTemp(cUnit, rARG1 + inPosition);
- }
+ int firstIn = cUnit->numRegs;
+ const int numArgRegs = 3; // TODO: generalize & move to RegUtil.cc
+ for (int i = 0; i < mir->ssaRep->numUses; i++) {
+ int vReg = SRegToVReg(cUnit, mir->ssaRep->uses[i]);
+ int inPosition = vReg - firstIn;
+ if (inPosition < numArgRegs) {
+ oatLockTemp(cUnit, rARG1 + inPosition);
}
+ }
}
/* Find the next MIR, which may be in a following basic block */
MIR* getNextMir(CompilationUnit* cUnit, BasicBlock** pBb, MIR* mir)
{
- BasicBlock* bb = *pBb;
- MIR* origMir = mir;
- while (bb != NULL) {
- if (mir != NULL) {
- mir = mir->next;
- }
- if (mir != NULL) {
- return mir;
- } else {
- bb = bb->fallThrough;
- *pBb = bb;
- if (bb) {
- mir = bb->firstMIRInsn;
- if (mir != NULL) {
- return mir;
- }
- }
- }
+ BasicBlock* bb = *pBb;
+ MIR* origMir = mir;
+ while (bb != NULL) {
+ if (mir != NULL) {
+ mir = mir->next;
}
- return origMir;
+ if (mir != NULL) {
+ return mir;
+ } else {
+ bb = bb->fallThrough;
+ *pBb = bb;
+ if (bb) {
+ mir = bb->firstMIRInsn;
+ if (mir != NULL) {
+ return mir;
+ }
+ }
+ }
+ }
+ return origMir;
}
/* Used for the "printMe" listing */
void genPrintLabel(CompilationUnit *cUnit, MIR* mir)
{
- LIR* boundaryLIR;
- /* Mark the beginning of a Dalvik instruction for line tracking */
- char* instStr = cUnit->printMe ?
- oatGetDalvikDisassembly(cUnit, mir->dalvikInsn, "") : NULL;
- boundaryLIR = newLIR1(cUnit, kPseudoDalvikByteCodeBoundary,
- (intptr_t) instStr);
- cUnit->boundaryMap.Put(mir->offset, boundaryLIR);
- /* Don't generate the SSA annotation unless verbose mode is on */
- if (cUnit->printMe && mir->ssaRep) {
- char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
- newLIR1(cUnit, kPseudoSSARep, (int) ssaString);
- }
+ LIR* boundaryLIR;
+ /* Mark the beginning of a Dalvik instruction for line tracking */
+ char* instStr = cUnit->printMe ?
+ oatGetDalvikDisassembly(cUnit, mir->dalvikInsn, "") : NULL;
+ boundaryLIR = newLIR1(cUnit, kPseudoDalvikByteCodeBoundary,
+ (intptr_t) instStr);
+ cUnit->boundaryMap.Put(mir->offset, boundaryLIR);
+ /* Don't generate the SSA annotation unless verbose mode is on */
+ if (cUnit->printMe && mir->ssaRep) {
+ char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
+ newLIR1(cUnit, kPseudoSSARep, (int) ssaString);
+ }
}
MIR* specialIGet(CompilationUnit* cUnit, BasicBlock** bb, MIR* mir,
OpSize size, bool longOrDouble, bool isObject)
{
- int fieldOffset;
- bool isVolatile;
- uint32_t fieldIdx = mir->dalvikInsn.vC;
- bool fastPath = fastInstance(cUnit, fieldIdx, fieldOffset, isVolatile,
- false);
- if (!fastPath || !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
- }
- RegLocation rlObj = oatGetSrc(cUnit, mir, 0);
- lockLiveArgs(cUnit, mir);
- rlObj = argLoc(cUnit, rlObj);
- RegLocation rlDest;
- if (longOrDouble) {
- rlDest = oatGetReturnWide(cUnit, false);
- } else {
- rlDest = oatGetReturn(cUnit, false);
- }
- // Point of no return - no aborts after this
- genPrintLabel(cUnit, mir);
- rlObj = loadArg(cUnit, rlObj);
- genIGet(cUnit, mir, size, rlDest, rlObj, longOrDouble, isObject);
- return getNextMir(cUnit, bb, mir);
+ int fieldOffset;
+ bool isVolatile;
+ uint32_t fieldIdx = mir->dalvikInsn.vC;
+ bool fastPath = fastInstance(cUnit, fieldIdx, fieldOffset, isVolatile, false);
+ if (!fastPath || !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+ return NULL;
+ }
+ RegLocation rlObj = oatGetSrc(cUnit, mir, 0);
+ lockLiveArgs(cUnit, mir);
+ rlObj = argLoc(cUnit, rlObj);
+ RegLocation rlDest;
+ if (longOrDouble) {
+ rlDest = oatGetReturnWide(cUnit, false);
+ } else {
+ rlDest = oatGetReturn(cUnit, false);
+ }
+ // Point of no return - no aborts after this
+ genPrintLabel(cUnit, mir);
+ rlObj = loadArg(cUnit, rlObj);
+ genIGet(cUnit, mir, size, rlDest, rlObj, longOrDouble, isObject);
+ return getNextMir(cUnit, bb, mir);
}
MIR* specialIPut(CompilationUnit* cUnit, BasicBlock** bb, MIR* mir,
OpSize size, bool longOrDouble, bool isObject)
{
- int fieldOffset;
- bool isVolatile;
- uint32_t fieldIdx = mir->dalvikInsn.vC;
- bool fastPath = fastInstance(cUnit, fieldIdx, fieldOffset, isVolatile,
- false);
- if (!fastPath || !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
- }
- RegLocation rlSrc;
- RegLocation rlObj;
- lockLiveArgs(cUnit, mir);
- if (longOrDouble) {
- rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- rlObj = oatGetSrc(cUnit, mir, 2);
- } else {
- rlSrc = oatGetSrc(cUnit, mir, 0);
- rlObj = oatGetSrc(cUnit, mir, 1);
- }
- rlSrc = argLoc(cUnit, rlSrc);
- rlObj = argLoc(cUnit, rlObj);
- // Reject if source is split across registers & frame
- if (rlObj.location == kLocInvalid) {
- oatResetRegPool(cUnit);
- return NULL;
- }
- // Point of no return - no aborts after this
- genPrintLabel(cUnit, mir);
- rlObj = loadArg(cUnit, rlObj);
- rlSrc = loadArg(cUnit, rlSrc);
- genIPut(cUnit, mir, size, rlSrc, rlObj, longOrDouble, isObject);
- return getNextMir(cUnit, bb, mir);
+ int fieldOffset;
+ bool isVolatile;
+ uint32_t fieldIdx = mir->dalvikInsn.vC;
+ bool fastPath = fastInstance(cUnit, fieldIdx, fieldOffset, isVolatile, false);
+ if (!fastPath || !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+ return NULL;
+ }
+ RegLocation rlSrc;
+ RegLocation rlObj;
+ lockLiveArgs(cUnit, mir);
+ if (longOrDouble) {
+ rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ rlObj = oatGetSrc(cUnit, mir, 2);
+ } else {
+ rlSrc = oatGetSrc(cUnit, mir, 0);
+ rlObj = oatGetSrc(cUnit, mir, 1);
+ }
+ rlSrc = argLoc(cUnit, rlSrc);
+ rlObj = argLoc(cUnit, rlObj);
+ // Reject if source is split across registers & frame
+ if (rlObj.location == kLocInvalid) {
+ oatResetRegPool(cUnit);
+ return NULL;
+ }
+ // Point of no return - no aborts after this
+ genPrintLabel(cUnit, mir);
+ rlObj = loadArg(cUnit, rlObj);
+ rlSrc = loadArg(cUnit, rlSrc);
+ genIPut(cUnit, mir, size, rlSrc, rlObj, longOrDouble, isObject);
+ return getNextMir(cUnit, bb, mir);
}
MIR* specialIdentity(CompilationUnit* cUnit, MIR* mir)
{
- RegLocation rlSrc;
- RegLocation rlDest;
- bool wide = (mir->ssaRep->numUses == 2);
- if (wide) {
- rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- rlDest = oatGetReturnWide(cUnit, false);
- } else {
- rlSrc = oatGetSrc(cUnit, mir, 0);
- rlDest = oatGetReturn(cUnit, false);
- }
- lockLiveArgs(cUnit, mir);
- rlSrc = argLoc(cUnit, rlSrc);
- if (rlSrc.location == kLocInvalid) {
- oatResetRegPool(cUnit);
- return NULL;
- }
- // Point of no return - no aborts after this
- genPrintLabel(cUnit, mir);
- rlSrc = loadArg(cUnit, rlSrc);
- if (wide) {
- storeValueWide(cUnit, rlDest, rlSrc);
- } else {
- storeValue(cUnit, rlDest, rlSrc);
- }
- return mir;
+ RegLocation rlSrc;
+ RegLocation rlDest;
+ bool wide = (mir->ssaRep->numUses == 2);
+ if (wide) {
+ rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ rlDest = oatGetReturnWide(cUnit, false);
+ } else {
+ rlSrc = oatGetSrc(cUnit, mir, 0);
+ rlDest = oatGetReturn(cUnit, false);
+ }
+ lockLiveArgs(cUnit, mir);
+ rlSrc = argLoc(cUnit, rlSrc);
+ if (rlSrc.location == kLocInvalid) {
+ oatResetRegPool(cUnit);
+ return NULL;
+ }
+ // Point of no return - no aborts after this
+ genPrintLabel(cUnit, mir);
+ rlSrc = loadArg(cUnit, rlSrc);
+ if (wide) {
+ storeValueWide(cUnit, rlDest, rlSrc);
+ } else {
+ storeValue(cUnit, rlDest, rlSrc);
+ }
+ return mir;
}
/*
* Special-case code genration for simple non-throwing leaf methods.
*/
void genSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- SpecialCaseHandler specialCase)
+ SpecialCaseHandler specialCase)
{
cUnit->currentDalvikOffset = mir->offset;
MIR* nextMir = NULL;
switch (specialCase) {
- case kNullMethod:
- DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
- nextMir = mir;
- break;
- case kConstFunction:
- genPrintLabel(cUnit, mir);
- loadConstant(cUnit, rRET0, mir->dalvikInsn.vB);
- nextMir = getNextMir(cUnit, &bb, mir);
- break;
- case kIGet:
- nextMir = specialIGet(cUnit, &bb, mir, kWord, false, false);
- break;
- case kIGetBoolean:
- case kIGetByte:
- nextMir = specialIGet(cUnit, &bb, mir, kUnsignedByte, false, false);
- break;
- case kIGetObject:
- nextMir = specialIGet(cUnit, &bb, mir, kWord, false, true);
- break;
- case kIGetChar:
- nextMir = specialIGet(cUnit, &bb, mir, kUnsignedHalf, false, false);
- break;
- case kIGetShort:
- nextMir = specialIGet(cUnit, &bb, mir, kSignedHalf, false, false);
- break;
- case kIGetWide:
- nextMir = specialIGet(cUnit, &bb, mir, kLong, true, false);
- break;
- case kIPut:
- nextMir = specialIPut(cUnit, &bb, mir, kWord, false, false);
- break;
- case kIPutBoolean:
- case kIPutByte:
- nextMir = specialIPut(cUnit, &bb, mir, kUnsignedByte, false, false);
- break;
- case kIPutObject:
- nextMir = specialIPut(cUnit, &bb, mir, kWord, false, true);
- break;
- case kIPutChar:
- nextMir = specialIPut(cUnit, &bb, mir, kUnsignedHalf, false, false);
- break;
- case kIPutShort:
- nextMir = specialIPut(cUnit, &bb, mir, kSignedHalf, false, false);
- break;
- case kIPutWide:
- nextMir = specialIPut(cUnit, &bb, mir, kLong, true, false);
- break;
- case kIdentity:
- nextMir = specialIdentity(cUnit, mir);
- break;
- default:
- return;
+ case kNullMethod:
+ DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
+ nextMir = mir;
+ break;
+ case kConstFunction:
+ genPrintLabel(cUnit, mir);
+ loadConstant(cUnit, rRET0, mir->dalvikInsn.vB);
+ nextMir = getNextMir(cUnit, &bb, mir);
+ break;
+ case kIGet:
+ nextMir = specialIGet(cUnit, &bb, mir, kWord, false, false);
+ break;
+ case kIGetBoolean:
+ case kIGetByte:
+ nextMir = specialIGet(cUnit, &bb, mir, kUnsignedByte, false, false);
+ break;
+ case kIGetObject:
+ nextMir = specialIGet(cUnit, &bb, mir, kWord, false, true);
+ break;
+ case kIGetChar:
+ nextMir = specialIGet(cUnit, &bb, mir, kUnsignedHalf, false, false);
+ break;
+ case kIGetShort:
+ nextMir = specialIGet(cUnit, &bb, mir, kSignedHalf, false, false);
+ break;
+ case kIGetWide:
+ nextMir = specialIGet(cUnit, &bb, mir, kLong, true, false);
+ break;
+ case kIPut:
+ nextMir = specialIPut(cUnit, &bb, mir, kWord, false, false);
+ break;
+ case kIPutBoolean:
+ case kIPutByte:
+ nextMir = specialIPut(cUnit, &bb, mir, kUnsignedByte, false, false);
+ break;
+ case kIPutObject:
+ nextMir = specialIPut(cUnit, &bb, mir, kWord, false, true);
+ break;
+ case kIPutChar:
+ nextMir = specialIPut(cUnit, &bb, mir, kUnsignedHalf, false, false);
+ break;
+ case kIPutShort:
+ nextMir = specialIPut(cUnit, &bb, mir, kSignedHalf, false, false);
+ break;
+ case kIPutWide:
+ nextMir = specialIPut(cUnit, &bb, mir, kLong, true, false);
+ break;
+ case kIdentity:
+ nextMir = specialIdentity(cUnit, mir);
+ break;
+ default:
+ return;
}
if (nextMir != NULL) {
- cUnit->currentDalvikOffset = nextMir->offset;
- if (specialCase != kIdentity) {
- genPrintLabel(cUnit, nextMir);
- }
- newLIR1(cUnit, kThumbBx, rLR);
- cUnit->coreSpillMask = 0;
- cUnit->numCoreSpills = 0;
- cUnit->fpSpillMask = 0;
- cUnit->numFPSpills = 0;
- cUnit->frameSize = 0;
- cUnit->coreVmapTable.clear();
- cUnit->fpVmapTable.clear();
+ cUnit->currentDalvikOffset = nextMir->offset;
+ if (specialCase != kIdentity) {
+ genPrintLabel(cUnit, nextMir);
}
+ newLIR1(cUnit, kThumbBx, rLR);
+ cUnit->coreSpillMask = 0;
+ cUnit->numCoreSpills = 0;
+ cUnit->fpSpillMask = 0;
+ cUnit->numFPSpills = 0;
+ cUnit->frameSize = 0;
+ cUnit->coreVmapTable.clear();
+ cUnit->fpVmapTable.clear();
+ }
}
/*
@@ -323,30 +321,30 @@
*/
LIR* opIT(CompilationUnit* cUnit, ArmConditionCode code, const char* guide)
{
- int mask;
- int condBit = code & 1;
- int altBit = condBit ^ 1;
- int mask3 = 0;
- int mask2 = 0;
- int mask1 = 0;
+ int mask;
+ int condBit = code & 1;
+ int altBit = condBit ^ 1;
+ int mask3 = 0;
+ int mask2 = 0;
+ int mask1 = 0;
- //Note: case fallthroughs intentional
- switch (strlen(guide)) {
- case 3:
- mask1 = (guide[2] == 'T') ? condBit : altBit;
- case 2:
- mask2 = (guide[1] == 'T') ? condBit : altBit;
- case 1:
- mask3 = (guide[0] == 'T') ? condBit : altBit;
- break;
- case 0:
- break;
- default:
- LOG(FATAL) << "OAT: bad case in opIT";
- }
- mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
- (1 << (3 - strlen(guide)));
- return newLIR2(cUnit, kThumb2It, code, mask);
+ //Note: case fallthroughs intentional
+ switch (strlen(guide)) {
+ case 3:
+ mask1 = (guide[2] == 'T') ? condBit : altBit;
+ case 2:
+ mask2 = (guide[1] == 'T') ? condBit : altBit;
+ case 1:
+ mask3 = (guide[0] == 'T') ? condBit : altBit;
+ break;
+ case 0:
+ break;
+ default:
+ LOG(FATAL) << "OAT: bad case in opIT";
+ }
+ mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
+ (1 << (3 - strlen(guide)));
+ return newLIR2(cUnit, kThumb2It, code, mask);
}
/*
@@ -371,97 +369,95 @@
void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
LIR* labelList)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
- if (cUnit->printMe) {
- dumpSparseSwitchTable(table);
- }
- // Add the table to the list - we'll process it later
- SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
- true, kAllocData);
- tabRec->table = table;
- tabRec->vaddr = mir->offset;
- int size = table[1];
- tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true,
- kAllocLIR);
- oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ if (cUnit->printMe) {
+ dumpSparseSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
+ true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ int size = table[1];
+ tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true, kAllocLIR);
+ oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
- // Get the switch value
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- int rBase = oatAllocTemp(cUnit);
- /* Allocate key and disp temps */
- int rKey = oatAllocTemp(cUnit);
- int rDisp = oatAllocTemp(cUnit);
- // Make sure rKey's register number is less than rDisp's number for ldmia
- if (rKey > rDisp) {
- int tmp = rDisp;
- rDisp = rKey;
- rKey = tmp;
- }
- // Materialize a pointer to the switch table
- newLIR3(cUnit, kThumb2Adr, rBase, 0, (intptr_t)tabRec);
- // Set up rIdx
- int rIdx = oatAllocTemp(cUnit);
- loadConstant(cUnit, rIdx, size);
- // Establish loop branch target
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- // Load next key/disp
- newLIR2(cUnit, kThumb2LdmiaWB, rBase, (1 << rKey) | (1 << rDisp));
- opRegReg(cUnit, kOpCmp, rKey, rlSrc.lowReg);
- // Go if match. NOTE: No instruction set switch here - must stay Thumb2
- opIT(cUnit, kArmCondEq, "");
- LIR* switchBranch = newLIR1(cUnit, kThumb2AddPCR, rDisp);
- tabRec->anchor = switchBranch;
- // Needs to use setflags encoding here
- newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
- opCondBranch(cUnit, kCondNe, target);
+ // Get the switch value
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ int rBase = oatAllocTemp(cUnit);
+ /* Allocate key and disp temps */
+ int rKey = oatAllocTemp(cUnit);
+ int rDisp = oatAllocTemp(cUnit);
+ // Make sure rKey's register number is less than rDisp's number for ldmia
+ if (rKey > rDisp) {
+ int tmp = rDisp;
+ rDisp = rKey;
+ rKey = tmp;
+ }
+ // Materialize a pointer to the switch table
+ newLIR3(cUnit, kThumb2Adr, rBase, 0, (intptr_t)tabRec);
+ // Set up rIdx
+ int rIdx = oatAllocTemp(cUnit);
+ loadConstant(cUnit, rIdx, size);
+ // Establish loop branch target
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ // Load next key/disp
+ newLIR2(cUnit, kThumb2LdmiaWB, rBase, (1 << rKey) | (1 << rDisp));
+ opRegReg(cUnit, kOpCmp, rKey, rlSrc.lowReg);
+ // Go if match. NOTE: No instruction set switch here - must stay Thumb2
+ opIT(cUnit, kArmCondEq, "");
+ LIR* switchBranch = newLIR1(cUnit, kThumb2AddPCR, rDisp);
+ tabRec->anchor = switchBranch;
+ // Needs to use setflags encoding here
+ newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
+ opCondBranch(cUnit, kCondNe, target);
}
void genPackedSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
- if (cUnit->printMe) {
- dumpPackedSwitchTable(table);
- }
- // Add the table to the list - we'll process it later
- SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
- true, kAllocData);
- tabRec->table = table;
- tabRec->vaddr = mir->offset;
- int size = table[1];
- tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true,
- kAllocLIR);
- oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ if (cUnit->printMe) {
+ dumpPackedSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
+ true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ int size = table[1];
+ tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true, kAllocLIR);
+ oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
- // Get the switch value
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- int tableBase = oatAllocTemp(cUnit);
- // Materialize a pointer to the switch table
- newLIR3(cUnit, kThumb2Adr, tableBase, 0, (intptr_t)tabRec);
- int lowKey = s4FromSwitchData(&table[2]);
- int keyReg;
- // Remove the bias, if necessary
- if (lowKey == 0) {
- keyReg = rlSrc.lowReg;
- } else {
- keyReg = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpSub, keyReg, rlSrc.lowReg, lowKey);
- }
- // Bounds check - if < 0 or >= size continue following switch
- opRegImm(cUnit, kOpCmp, keyReg, size-1);
- LIR* branchOver = opCondBranch(cUnit, kCondHi, NULL);
+ // Get the switch value
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ int tableBase = oatAllocTemp(cUnit);
+ // Materialize a pointer to the switch table
+ newLIR3(cUnit, kThumb2Adr, tableBase, 0, (intptr_t)tabRec);
+ int lowKey = s4FromSwitchData(&table[2]);
+ int keyReg;
+ // Remove the bias, if necessary
+ if (lowKey == 0) {
+ keyReg = rlSrc.lowReg;
+ } else {
+ keyReg = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpSub, keyReg, rlSrc.lowReg, lowKey);
+ }
+ // Bounds check - if < 0 or >= size continue following switch
+ opRegImm(cUnit, kOpCmp, keyReg, size-1);
+ LIR* branchOver = opCondBranch(cUnit, kCondHi, NULL);
- // Load the displacement from the switch table
- int dispReg = oatAllocTemp(cUnit);
- loadBaseIndexed(cUnit, tableBase, keyReg, dispReg, 2, kWord);
+ // Load the displacement from the switch table
+ int dispReg = oatAllocTemp(cUnit);
+ loadBaseIndexed(cUnit, tableBase, keyReg, dispReg, 2, kWord);
- // ..and go! NOTE: No instruction set switch here - must stay Thumb2
- LIR* switchBranch = newLIR1(cUnit, kThumb2AddPCR, dispReg);
- tabRec->anchor = switchBranch;
+ // ..and go! NOTE: No instruction set switch here - must stay Thumb2
+ LIR* switchBranch = newLIR1(cUnit, kThumb2AddPCR, dispReg);
+ tabRec->anchor = switchBranch;
- /* branchOver target here */
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = (LIR*)target;
+ /* branchOver target here */
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)target;
}
/*
@@ -476,46 +472,46 @@
*/
void genFillArrayData(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
- // Add the table to the list - we'll process it later
- FillArrayData *tabRec = (FillArrayData *)
- oatNew(cUnit, sizeof(FillArrayData), true, kAllocData);
- tabRec->table = table;
- tabRec->vaddr = mir->offset;
- u2 width = tabRec->table[1];
- u4 size = tabRec->table[2] | (((u4)tabRec->table[3]) << 16);
- tabRec->size = (size * width) + 8;
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ // Add the table to the list - we'll process it later
+ FillArrayData *tabRec = (FillArrayData *)
+ oatNew(cUnit, sizeof(FillArrayData), true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ u2 width = tabRec->table[1];
+ u4 size = tabRec->table[2] | (((u4)tabRec->table[3]) << 16);
+ tabRec->size = (size * width) + 8;
- oatInsertGrowableList(cUnit, &cUnit->fillArrayData, (intptr_t)tabRec);
+ oatInsertGrowableList(cUnit, &cUnit->fillArrayData, (intptr_t)tabRec);
- // Making a call - use explicit registers
- oatFlushAllRegs(cUnit); /* Everything to home location */
- loadValueDirectFixed(cUnit, rlSrc, r0);
- loadWordDisp(cUnit, rSELF,
- ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rLR);
- // Materialize a pointer to the fill data image
- newLIR3(cUnit, kThumb2Adr, r1, 0, (intptr_t)tabRec);
- oatClobberCalleeSave(cUnit);
- opReg(cUnit, kOpBlx, rLR);
+ // Making a call - use explicit registers
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ loadValueDirectFixed(cUnit, rlSrc, r0);
+ loadWordDisp(cUnit, rSELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ rLR);
+ // Materialize a pointer to the fill data image
+ newLIR3(cUnit, kThumb2Adr, r1, 0, (intptr_t)tabRec);
+ oatClobberCalleeSave(cUnit);
+ opReg(cUnit, kOpBlx, rLR);
}
void genNegFloat(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- RegLocation rlResult;
- rlSrc = loadValue(cUnit, rlSrc, kFPReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- newLIR2(cUnit, kThumb2Vnegs, rlResult.lowReg, rlSrc.lowReg);
- storeValue(cUnit, rlDest, rlResult);
+ RegLocation rlResult;
+ rlSrc = loadValue(cUnit, rlSrc, kFPReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR2(cUnit, kThumb2Vnegs, rlResult.lowReg, rlSrc.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
}
void genNegDouble(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- RegLocation rlResult;
- rlSrc = loadValueWide(cUnit, rlSrc, kFPReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- newLIR2(cUnit, kThumb2Vnegd, S2D(rlResult.lowReg, rlResult.highReg),
- S2D(rlSrc.lowReg, rlSrc.highReg));
- storeValueWide(cUnit, rlDest, rlResult);
+ RegLocation rlResult;
+ rlSrc = loadValueWide(cUnit, rlSrc, kFPReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR2(cUnit, kThumb2Vnegd, S2D(rlResult.lowReg, rlResult.highReg),
+ S2D(rlSrc.lowReg, rlSrc.highReg));
+ storeValueWide(cUnit, rlDest, rlResult);
}
/*
@@ -546,31 +542,30 @@
*/
void genMonitorEnter(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- DCHECK_EQ(LW_SHAPE_THIN, 0);
- loadValueDirectFixed(cUnit, rlSrc, r0); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, r0, mir);
- loadWordDisp(cUnit, rSELF, Thread::ThinLockIdOffset().Int32Value(), r2);
- newLIR3(cUnit, kThumb2Ldrex, r1, r0,
- Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
- // Align owner
- opRegImm(cUnit, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
- // Is lock unheld on lock or held by us (==threadId) on unlock?
- newLIR4(cUnit, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
- newLIR3(cUnit, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
- opRegImm(cUnit, kOpCmp, r1, 0);
- opIT(cUnit, kArmCondEq, "");
- newLIR4(cUnit, kThumb2Strex, r1, r2, r0,
- Object::MonitorOffset().Int32Value() >> 2);
- opRegImm(cUnit, kOpCmp, r1, 0);
- opIT(cUnit, kArmCondNe, "T");
- // Go expensive route - artLockObjectFromCode(self, obj);
- loadWordDisp(cUnit, rSELF, ENTRYPOINT_OFFSET(pLockObjectFromCode),
- rLR);
- oatClobberCalleeSave(cUnit);
- opReg(cUnit, kOpBlx, rLR);
- oatGenMemBarrier(cUnit, kSY);
+ oatFlushAllRegs(cUnit);
+ DCHECK_EQ(LW_SHAPE_THIN, 0);
+ loadValueDirectFixed(cUnit, rlSrc, r0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, r0, mir);
+ loadWordDisp(cUnit, rSELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ newLIR3(cUnit, kThumb2Ldrex, r1, r0,
+ Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
+ // Align owner
+ opRegImm(cUnit, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+ // Is lock unheld on lock or held by us (==threadId) on unlock?
+ newLIR4(cUnit, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
+ newLIR3(cUnit, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+ opRegImm(cUnit, kOpCmp, r1, 0);
+ opIT(cUnit, kArmCondEq, "");
+ newLIR4(cUnit, kThumb2Strex, r1, r2, r0,
+ Object::MonitorOffset().Int32Value() >> 2);
+ opRegImm(cUnit, kOpCmp, r1, 0);
+ opIT(cUnit, kArmCondNe, "T");
+ // Go expensive route - artLockObjectFromCode(self, obj);
+ loadWordDisp(cUnit, rSELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rLR);
+ oatClobberCalleeSave(cUnit);
+ opReg(cUnit, kOpBlx, rLR);
+ oatGenMemBarrier(cUnit, kSY);
}
/*
@@ -581,27 +576,27 @@
*/
void genMonitorExit(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- DCHECK_EQ(LW_SHAPE_THIN, 0);
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, r0); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, r0, mir);
- loadWordDisp(cUnit, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock
- loadWordDisp(cUnit, rSELF, Thread::ThinLockIdOffset().Int32Value(), r2);
- // Is lock unheld on lock or held by us (==threadId) on unlock?
- opRegRegImm(cUnit, kOpAnd, r3, r1, (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
- // Align owner
- opRegImm(cUnit, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
- newLIR3(cUnit, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
- opRegReg(cUnit, kOpSub, r1, r2);
- opIT(cUnit, kArmCondEq, "EE");
- storeWordDisp(cUnit, r0, Object::MonitorOffset().Int32Value(), r3);
- // Go expensive route - UnlockObjectFromCode(obj);
- loadWordDisp(cUnit, rSELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode),
- rLR);
- oatClobberCalleeSave(cUnit);
- opReg(cUnit, kOpBlx, rLR);
- oatGenMemBarrier(cUnit, kSY);
+ DCHECK_EQ(LW_SHAPE_THIN, 0);
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, r0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, r0, mir);
+ loadWordDisp(cUnit, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock
+ loadWordDisp(cUnit, rSELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ // Is lock unheld on lock or held by us (==threadId) on unlock?
+ opRegRegImm(cUnit, kOpAnd, r3, r1,
+ (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
+ // Align owner
+ opRegImm(cUnit, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+ newLIR3(cUnit, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+ opRegReg(cUnit, kOpSub, r1, r2);
+ opIT(cUnit, kArmCondEq, "EE");
+ storeWordDisp(cUnit, r0, Object::MonitorOffset().Int32Value(), r3);
+ // Go expensive route - UnlockObjectFromCode(obj);
+ loadWordDisp(cUnit, rSELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rLR);
+ oatClobberCalleeSave(cUnit);
+ opReg(cUnit, kOpBlx, rLR);
+ oatGenMemBarrier(cUnit, kSY);
}
/*
@@ -620,83 +615,83 @@
* done:
*/
void genCmpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
- LIR* target1;
- LIR* target2;
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
- int tReg = oatAllocTemp(cUnit);
- loadConstant(cUnit, tReg, -1);
- opRegReg(cUnit, kOpCmp, rlSrc1.highReg, rlSrc2.highReg);
- LIR* branch1 = opCondBranch(cUnit, kCondLt, NULL);
- LIR* branch2 = opCondBranch(cUnit, kCondGt, NULL);
- opRegRegReg(cUnit, kOpSub, tReg, rlSrc1.lowReg, rlSrc2.lowReg);
- LIR* branch3 = opCondBranch(cUnit, kCondEq, NULL);
+ LIR* target1;
+ LIR* target2;
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ int tReg = oatAllocTemp(cUnit);
+ loadConstant(cUnit, tReg, -1);
+ opRegReg(cUnit, kOpCmp, rlSrc1.highReg, rlSrc2.highReg);
+ LIR* branch1 = opCondBranch(cUnit, kCondLt, NULL);
+ LIR* branch2 = opCondBranch(cUnit, kCondGt, NULL);
+ opRegRegReg(cUnit, kOpSub, tReg, rlSrc1.lowReg, rlSrc2.lowReg);
+ LIR* branch3 = opCondBranch(cUnit, kCondEq, NULL);
- opIT(cUnit, kArmCondHi, "E");
- newLIR2(cUnit, kThumb2MovImmShift, tReg, modifiedImmediate(-1));
- loadConstant(cUnit, tReg, 1);
- genBarrier(cUnit);
+ opIT(cUnit, kArmCondHi, "E");
+ newLIR2(cUnit, kThumb2MovImmShift, tReg, modifiedImmediate(-1));
+ loadConstant(cUnit, tReg, 1);
+ genBarrier(cUnit);
- target2 = newLIR0(cUnit, kPseudoTargetLabel);
- opRegReg(cUnit, kOpNeg, tReg, tReg);
+ target2 = newLIR0(cUnit, kPseudoTargetLabel);
+ opRegReg(cUnit, kOpNeg, tReg, tReg);
- target1 = newLIR0(cUnit, kPseudoTargetLabel);
+ target1 = newLIR0(cUnit, kPseudoTargetLabel);
- RegLocation rlTemp = LOC_C_RETURN; // Just using as template, will change
- rlTemp.lowReg = tReg;
- storeValue(cUnit, rlDest, rlTemp);
- oatFreeTemp(cUnit, tReg);
+ RegLocation rlTemp = LOC_C_RETURN; // Just using as template, will change
+ rlTemp.lowReg = tReg;
+ storeValue(cUnit, rlDest, rlTemp);
+ oatFreeTemp(cUnit, tReg);
- branch1->target = (LIR*)target1;
- branch2->target = (LIR*)target2;
- branch3->target = branch1->target;
+ branch1->target = (LIR*)target1;
+ branch2->target = (LIR*)target2;
+ branch3->target = branch1->target;
}
void genFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
{
- LIR* labelList = (LIR*)cUnit->blockLabelList;
- LIR* taken = &labelList[bb->taken->id];
- LIR* notTaken = &labelList[bb->fallThrough->id];
- RegLocation rlSrc1 = oatGetSrcWide(cUnit, mir, 0, 1);
- RegLocation rlSrc2 = oatGetSrcWide(cUnit, mir, 2, 3);
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
- ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
- opRegReg(cUnit, kOpCmp, rlSrc1.highReg, rlSrc2.highReg);
- switch(ccode) {
- case kCondEq:
- opCondBranch(cUnit, kCondNe, notTaken);
- break;
- case kCondNe:
- opCondBranch(cUnit, kCondNe, taken);
- break;
- case kCondLt:
- opCondBranch(cUnit, kCondLt, taken);
- opCondBranch(cUnit, kCondGt, notTaken);
- ccode = kCondCc;
- break;
- case kCondLe:
- opCondBranch(cUnit, kCondLt, taken);
- opCondBranch(cUnit, kCondGt, notTaken);
- ccode = kCondLs;
- break;
- case kCondGt:
- opCondBranch(cUnit, kCondGt, taken);
- opCondBranch(cUnit, kCondLt, notTaken);
- ccode = kCondHi;
- break;
- case kCondGe:
- opCondBranch(cUnit, kCondGt, taken);
- opCondBranch(cUnit, kCondLt, notTaken);
- ccode = kCondCs;
- break;
- default:
- LOG(FATAL) << "Unexpected ccode: " << (int)ccode;
- }
- opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- opCondBranch(cUnit, ccode, taken);
+ LIR* labelList = (LIR*)cUnit->blockLabelList;
+ LIR* taken = &labelList[bb->taken->id];
+ LIR* notTaken = &labelList[bb->fallThrough->id];
+ RegLocation rlSrc1 = oatGetSrcWide(cUnit, mir, 0, 1);
+ RegLocation rlSrc2 = oatGetSrcWide(cUnit, mir, 2, 3);
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ opRegReg(cUnit, kOpCmp, rlSrc1.highReg, rlSrc2.highReg);
+ switch(ccode) {
+ case kCondEq:
+ opCondBranch(cUnit, kCondNe, notTaken);
+ break;
+ case kCondNe:
+ opCondBranch(cUnit, kCondNe, taken);
+ break;
+ case kCondLt:
+ opCondBranch(cUnit, kCondLt, taken);
+ opCondBranch(cUnit, kCondGt, notTaken);
+ ccode = kCondCc;
+ break;
+ case kCondLe:
+ opCondBranch(cUnit, kCondLt, taken);
+ opCondBranch(cUnit, kCondGt, notTaken);
+ ccode = kCondLs;
+ break;
+ case kCondGt:
+ opCondBranch(cUnit, kCondGt, taken);
+ opCondBranch(cUnit, kCondLt, notTaken);
+ ccode = kCondHi;
+ break;
+ case kCondGe:
+ opCondBranch(cUnit, kCondGt, taken);
+ opCondBranch(cUnit, kCondLt, notTaken);
+ ccode = kCondCs;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << (int)ccode;
+ }
+ opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
+ opCondBranch(cUnit, ccode, taken);
}
/*
@@ -704,166 +699,165 @@
* is responsible for setting branch target field.
*/
LIR* opCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
- int checkValue, LIR* target)
+ int checkValue, LIR* target)
{
- LIR* branch;
- int modImm;
- ArmConditionCode armCond = oatArmConditionEncoding(cond);
- if ((LOWREG(reg)) && (checkValue == 0) &&
- ((armCond == kArmCondEq) || (armCond == kArmCondNe))) {
- branch = newLIR2(cUnit,
- (armCond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
- reg, 0);
+ LIR* branch;
+ int modImm;
+ ArmConditionCode armCond = oatArmConditionEncoding(cond);
+ if ((LOWREG(reg)) && (checkValue == 0) &&
+ ((armCond == kArmCondEq) || (armCond == kArmCondNe))) {
+ branch = newLIR2(cUnit, (armCond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
+ reg, 0);
+ } else {
+ modImm = modifiedImmediate(checkValue);
+ if (LOWREG(reg) && ((checkValue & 0xff) == checkValue)) {
+ newLIR2(cUnit, kThumbCmpRI8, reg, checkValue);
+ } else if (modImm >= 0) {
+ newLIR2(cUnit, kThumb2CmpRI8, reg, modImm);
} else {
- modImm = modifiedImmediate(checkValue);
- if (LOWREG(reg) && ((checkValue & 0xff) == checkValue)) {
- newLIR2(cUnit, kThumbCmpRI8, reg, checkValue);
- } else if (modImm >= 0) {
- newLIR2(cUnit, kThumb2CmpRI8, reg, modImm);
- } else {
- int tReg = oatAllocTemp(cUnit);
- loadConstant(cUnit, tReg, checkValue);
- opRegReg(cUnit, kOpCmp, reg, tReg);
- }
- branch = newLIR2(cUnit, kThumbBCond, 0, armCond);
+ int tReg = oatAllocTemp(cUnit);
+ loadConstant(cUnit, tReg, checkValue);
+ opRegReg(cUnit, kOpCmp, reg, tReg);
}
- branch->target = target;
- return branch;
+ branch = newLIR2(cUnit, kThumbBCond, 0, armCond);
+ }
+ branch->target = target;
+ return branch;
}
LIR* opRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
{
- LIR* res;
- ArmOpcode opcode;
- if (FPREG(rDest) || FPREG(rSrc))
- return fpRegCopy(cUnit, rDest, rSrc);
- if (LOWREG(rDest) && LOWREG(rSrc))
- opcode = kThumbMovRR;
- else if (!LOWREG(rDest) && !LOWREG(rSrc))
- opcode = kThumbMovRR_H2H;
- else if (LOWREG(rDest))
- opcode = kThumbMovRR_H2L;
- else
- opcode = kThumbMovRR_L2H;
- res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
+ LIR* res;
+ ArmOpcode opcode;
+ if (FPREG(rDest) || FPREG(rSrc))
+ return fpRegCopy(cUnit, rDest, rSrc);
+ if (LOWREG(rDest) && LOWREG(rSrc))
+ opcode = kThumbMovRR;
+ else if (!LOWREG(rDest) && !LOWREG(rSrc))
+ opcode = kThumbMovRR_H2H;
+ else if (LOWREG(rDest))
+ opcode = kThumbMovRR_H2L;
+ else
+ opcode = kThumbMovRR_L2H;
+ res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
+ if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
}
LIR* opRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
{
- LIR* res = opRegCopyNoInsert(cUnit, rDest, rSrc);
- oatAppendLIR(cUnit, (LIR*)res);
- return res;
+ LIR* res = opRegCopyNoInsert(cUnit, rDest, rSrc);
+ oatAppendLIR(cUnit, (LIR*)res);
+ return res;
}
void opRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
- int srcLo, int srcHi)
+ int srcLo, int srcHi)
{
- bool destFP = FPREG(destLo) && FPREG(destHi);
- bool srcFP = FPREG(srcLo) && FPREG(srcHi);
- DCHECK_EQ(FPREG(srcLo), FPREG(srcHi));
- DCHECK_EQ(FPREG(destLo), FPREG(destHi));
- if (destFP) {
- if (srcFP) {
- opRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi));
- } else {
- newLIR3(cUnit, kThumb2Fmdrr, S2D(destLo, destHi), srcLo, srcHi);
- }
+ bool destFP = FPREG(destLo) && FPREG(destHi);
+ bool srcFP = FPREG(srcLo) && FPREG(srcHi);
+ DCHECK_EQ(FPREG(srcLo), FPREG(srcHi));
+ DCHECK_EQ(FPREG(destLo), FPREG(destHi));
+ if (destFP) {
+ if (srcFP) {
+ opRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi));
} else {
- if (srcFP) {
- newLIR3(cUnit, kThumb2Fmrrd, destLo, destHi, S2D(srcLo, srcHi));
- } else {
- // Handle overlap
- if (srcHi == destLo) {
- opRegCopy(cUnit, destHi, srcHi);
- opRegCopy(cUnit, destLo, srcLo);
- } else {
- opRegCopy(cUnit, destLo, srcLo);
- opRegCopy(cUnit, destHi, srcHi);
- }
- }
+ newLIR3(cUnit, kThumb2Fmdrr, S2D(destLo, destHi), srcLo, srcHi);
}
+ } else {
+ if (srcFP) {
+ newLIR3(cUnit, kThumb2Fmrrd, destLo, destHi, S2D(srcLo, srcHi));
+ } else {
+ // Handle overlap
+ if (srcHi == destLo) {
+ opRegCopy(cUnit, destHi, srcHi);
+ opRegCopy(cUnit, destLo, srcLo);
+ } else {
+ opRegCopy(cUnit, destLo, srcLo);
+ opRegCopy(cUnit, destHi, srcHi);
+ }
+ }
+ }
}
// Table of magic divisors
enum DividePattern {
- DivideNone,
- Divide3,
- Divide5,
- Divide7,
+ DivideNone,
+ Divide3,
+ Divide5,
+ Divide7,
};
struct MagicTable {
- uint32_t magic;
- uint32_t shift;
- DividePattern pattern;
+ uint32_t magic;
+ uint32_t shift;
+ DividePattern pattern;
};
static const MagicTable magicTable[] = {
- {0, 0, DivideNone}, // 0
- {0, 0, DivideNone}, // 1
- {0, 0, DivideNone}, // 2
- {0x55555556, 0, Divide3}, // 3
- {0, 0, DivideNone}, // 4
- {0x66666667, 1, Divide5}, // 5
- {0x2AAAAAAB, 0, Divide3}, // 6
- {0x92492493, 2, Divide7}, // 7
- {0, 0, DivideNone}, // 8
- {0x38E38E39, 1, Divide5}, // 9
- {0x66666667, 2, Divide5}, // 10
- {0x2E8BA2E9, 1, Divide5}, // 11
- {0x2AAAAAAB, 1, Divide5}, // 12
- {0x4EC4EC4F, 2, Divide5}, // 13
- {0x92492493, 3, Divide7}, // 14
- {0x88888889, 3, Divide7}, // 15
+ {0, 0, DivideNone}, // 0
+ {0, 0, DivideNone}, // 1
+ {0, 0, DivideNone}, // 2
+ {0x55555556, 0, Divide3}, // 3
+ {0, 0, DivideNone}, // 4
+ {0x66666667, 1, Divide5}, // 5
+ {0x2AAAAAAB, 0, Divide3}, // 6
+ {0x92492493, 2, Divide7}, // 7
+ {0, 0, DivideNone}, // 8
+ {0x38E38E39, 1, Divide5}, // 9
+ {0x66666667, 2, Divide5}, // 10
+ {0x2E8BA2E9, 1, Divide5}, // 11
+ {0x2AAAAAAB, 1, Divide5}, // 12
+ {0x4EC4EC4F, 2, Divide5}, // 13
+ {0x92492493, 3, Divide7}, // 14
+ {0x88888889, 3, Divide7}, // 15
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool smallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
RegLocation rlSrc, RegLocation rlDest, int lit)
{
- if ((lit < 0) || (lit >= (int)(sizeof(magicTable)/sizeof(magicTable[0])))) {
- return false;
- }
- DividePattern pattern = magicTable[lit].pattern;
- if (pattern == DivideNone) {
- return false;
- }
- // Tuning: add rem patterns
- if (dalvikOpcode != Instruction::DIV_INT_LIT8) {
- return false;
- }
+ if ((lit < 0) || (lit >= (int)(sizeof(magicTable)/sizeof(magicTable[0])))) {
+ return false;
+ }
+ DividePattern pattern = magicTable[lit].pattern;
+ if (pattern == DivideNone) {
+ return false;
+ }
+ // Tuning: add rem patterns
+ if (dalvikOpcode != Instruction::DIV_INT_LIT8) {
+ return false;
+ }
- int rMagic = oatAllocTemp(cUnit);
- loadConstant(cUnit, rMagic, magicTable[lit].magic);
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- int rHi = oatAllocTemp(cUnit);
- int rLo = oatAllocTemp(cUnit);
- newLIR4(cUnit, kThumb2Smull, rLo, rHi, rMagic, rlSrc.lowReg);
- switch(pattern) {
- case Divide3:
- opRegRegRegShift(cUnit, kOpSub, rlResult.lowReg, rHi,
- rlSrc.lowReg, encodeShift(kArmAsr, 31));
- break;
- case Divide5:
- opRegRegImm(cUnit, kOpAsr, rLo, rlSrc.lowReg, 31);
- opRegRegRegShift(cUnit, kOpRsub, rlResult.lowReg, rLo, rHi,
- encodeShift(kArmAsr, magicTable[lit].shift));
- break;
- case Divide7:
- opRegReg(cUnit, kOpAdd, rHi, rlSrc.lowReg);
- opRegRegImm(cUnit, kOpAsr, rLo, rlSrc.lowReg, 31);
- opRegRegRegShift(cUnit, kOpRsub, rlResult.lowReg, rLo, rHi,
- encodeShift(kArmAsr, magicTable[lit].shift));
- break;
- default:
- LOG(FATAL) << "Unexpected pattern: " << (int)pattern;
- }
- storeValue(cUnit, rlDest, rlResult);
- return true;
+ int rMagic = oatAllocTemp(cUnit);
+ loadConstant(cUnit, rMagic, magicTable[lit].magic);
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ int rHi = oatAllocTemp(cUnit);
+ int rLo = oatAllocTemp(cUnit);
+ newLIR4(cUnit, kThumb2Smull, rLo, rHi, rMagic, rlSrc.lowReg);
+ switch(pattern) {
+ case Divide3:
+ opRegRegRegShift(cUnit, kOpSub, rlResult.lowReg, rHi,
+ rlSrc.lowReg, encodeShift(kArmAsr, 31));
+ break;
+ case Divide5:
+ opRegRegImm(cUnit, kOpAsr, rLo, rlSrc.lowReg, 31);
+ opRegRegRegShift(cUnit, kOpRsub, rlResult.lowReg, rLo, rHi,
+ encodeShift(kArmAsr, magicTable[lit].shift));
+ break;
+ case Divide7:
+ opRegReg(cUnit, kOpAdd, rHi, rlSrc.lowReg);
+ opRegRegImm(cUnit, kOpAsr, rLo, rlSrc.lowReg, 31);
+ opRegRegRegShift(cUnit, kOpRsub, rlResult.lowReg, rLo, rHi,
+ encodeShift(kArmAsr, magicTable[lit].shift));
+ break;
+ default:
+ LOG(FATAL) << "Unexpected pattern: " << (int)pattern;
+ }
+ storeValue(cUnit, rlDest, rlResult);
+ return true;
}
} // namespace art
diff --git a/src/compiler/codegen/arm/Thumb2/Ralloc.cc b/src/compiler/codegen/arm/Thumb2/Ralloc.cc
index 7858318..98a110c 100644
--- a/src/compiler/codegen/arm/Thumb2/Ralloc.cc
+++ b/src/compiler/codegen/arm/Thumb2/Ralloc.cc
@@ -30,96 +30,96 @@
*/
int oatAllocTypedTempPair(CompilationUnit* cUnit, bool fpHint, int regClass)
{
- int highReg;
- int lowReg;
- int res = 0;
+ int highReg;
+ int lowReg;
+ int res = 0;
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- lowReg = oatAllocTempDouble(cUnit);
- highReg = lowReg + 1;
- } else {
- lowReg = oatAllocTemp(cUnit);
- highReg = oatAllocTemp(cUnit);
- }
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
- return res;
+ if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
+ lowReg = oatAllocTempDouble(cUnit);
+ highReg = lowReg + 1;
+ } else {
+ lowReg = oatAllocTemp(cUnit);
+ highReg = oatAllocTemp(cUnit);
+ }
+ res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ return res;
}
int oatAllocTypedTemp(CompilationUnit* cUnit, bool fpHint, int regClass)
{
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
- return oatAllocTempFloat(cUnit);
- return oatAllocTemp(cUnit);
+ if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
+ return oatAllocTempFloat(cUnit);
+ return oatAllocTemp(cUnit);
}
void oatInitializeRegAlloc(CompilationUnit* cUnit)
{
- int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
- int numReserved = sizeof(reservedRegs)/sizeof(*reservedRegs);
- int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
- int numFPRegs = sizeof(fpRegs)/sizeof(*fpRegs);
- int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
- RegisterPool *pool = (RegisterPool *)oatNew(cUnit, sizeof(*pool), true,
- kAllocRegAlloc);
- cUnit->regPool = pool;
- pool->numCoreRegs = numRegs;
- pool->coreRegs = (RegisterInfo *)
- oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs),
- true, kAllocRegAlloc);
- pool->numFPRegs = numFPRegs;
- pool->FPRegs = (RegisterInfo *)
- oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
- kAllocRegAlloc);
- oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
- oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
- // Keep special registers from being allocated
- for (int i = 0; i < numReserved; i++) {
- if (NO_SUSPEND && !cUnit->genDebugger &&
- (reservedRegs[i] == rSUSPEND)) {
- //To measure cost of suspend check
- continue;
- }
- oatMarkInUse(cUnit, reservedRegs[i]);
+ int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
+ int numReserved = sizeof(reservedRegs)/sizeof(*reservedRegs);
+ int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
+ int numFPRegs = sizeof(fpRegs)/sizeof(*fpRegs);
+ int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
+ RegisterPool *pool = (RegisterPool *)oatNew(cUnit, sizeof(*pool), true,
+ kAllocRegAlloc);
+ cUnit->regPool = pool;
+ pool->numCoreRegs = numRegs;
+ pool->coreRegs = (RegisterInfo *)
+ oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs),
+ true, kAllocRegAlloc);
+ pool->numFPRegs = numFPRegs;
+ pool->FPRegs = (RegisterInfo *)
+ oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
+ kAllocRegAlloc);
+ oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
+ oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
+ // Keep special registers from being allocated
+ for (int i = 0; i < numReserved; i++) {
+ if (NO_SUSPEND && !cUnit->genDebugger &&
+ (reservedRegs[i] == rSUSPEND)) {
+ //To measure cost of suspend check
+ continue;
}
- // Mark temp regs - all others not in use can be used for promotion
- for (int i = 0; i < numTemps; i++) {
- oatMarkTemp(cUnit, coreTemps[i]);
- }
- for (int i = 0; i < numFPTemps; i++) {
- oatMarkTemp(cUnit, fpTemps[i]);
- }
+ oatMarkInUse(cUnit, reservedRegs[i]);
+ }
+ // Mark temp regs - all others not in use can be used for promotion
+ for (int i = 0; i < numTemps; i++) {
+ oatMarkTemp(cUnit, coreTemps[i]);
+ }
+ for (int i = 0; i < numFPTemps; i++) {
+ oatMarkTemp(cUnit, fpTemps[i]);
+ }
- // Start allocation at r2 in an attempt to avoid clobbering return values
- pool->nextCoreReg = r2;
+ // Start allocation at r2 in an attempt to avoid clobbering return values
+ pool->nextCoreReg = r2;
- // Construct the alias map.
- cUnit->phiAliasMap = (int*)oatNew(cUnit, cUnit->numSSARegs *
- sizeof(cUnit->phiAliasMap[0]), false,
- kAllocDFInfo);
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- cUnit->phiAliasMap[i] = i;
+ // Construct the alias map.
+ cUnit->phiAliasMap = (int*)oatNew(cUnit, cUnit->numSSARegs *
+ sizeof(cUnit->phiAliasMap[0]), false,
+ kAllocDFInfo);
+ for (int i = 0; i < cUnit->numSSARegs; i++) {
+ cUnit->phiAliasMap[i] = i;
+ }
+ for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
+ int defReg = phi->ssaRep->defs[0];
+ for (int i = 0; i < phi->ssaRep->numUses; i++) {
+ for (int j = 0; j < cUnit->numSSARegs; j++) {
+ if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
+ cUnit->phiAliasMap[j] = defReg;
+ }
+ }
}
- for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
- int defReg = phi->ssaRep->defs[0];
- for (int i = 0; i < phi->ssaRep->numUses; i++) {
- for (int j = 0; j < cUnit->numSSARegs; j++) {
- if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
- cUnit->phiAliasMap[j] = defReg;
- }
- }
- }
- }
+ }
}
void freeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
RegLocation rlFree)
{
- if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
- (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
- // No overlap, free both
- oatFreeTemp(cUnit, rlFree.lowReg);
- oatFreeTemp(cUnit, rlFree.highReg);
- }
+ if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
+ (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
+ // No overlap, free both
+ oatFreeTemp(cUnit, rlFree.lowReg);
+ oatFreeTemp(cUnit, rlFree.highReg);
+ }
}
diff --git a/src/compiler/codegen/arm/armv7-a-neon/ArchVariant.cc b/src/compiler/codegen/arm/armv7-a-neon/ArchVariant.cc
index dcf3a99..0512896 100644
--- a/src/compiler/codegen/arm/armv7-a-neon/ArchVariant.cc
+++ b/src/compiler/codegen/arm/armv7-a-neon/ArchVariant.cc
@@ -22,33 +22,33 @@
*/
InstructionSet oatInstructionSet()
{
- return kThumb2;
+ return kThumb2;
}
/* Architecture-specific initializations and checks go here */
bool oatArchVariantInit(void)
{
- return true;
+ return true;
}
int oatTargetOptHint(int key)
{
- int res = 0;
- switch (key) {
- case kMaxHoistDistance:
- res = 7;
- break;
- default:
- LOG(FATAL) << "Unknown target optimization hint key: " << key;
- }
- return res;
+ int res = 0;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 7;
+ break;
+ default:
+ LOG(FATAL) << "Unknown target optimization hint key: " << key;
+ }
+ return res;
}
void oatGenMemBarrier(CompilationUnit* cUnit, int barrierKind)
{
#if ANDROID_SMP != 0
- LIR* dmb = newLIR1(cUnit, kThumb2Dmb, barrierKind);
- dmb->defMask = ENCODE_ALL;
+ LIR* dmb = newLIR1(cUnit, kThumb2Dmb, barrierKind);
+ dmb->defMask = ENCODE_ALL;
#endif
}
diff --git a/src/compiler/codegen/arm/armv7-a/ArchVariant.cc b/src/compiler/codegen/arm/armv7-a/ArchVariant.cc
index dcf3a99..3977d50 100644
--- a/src/compiler/codegen/arm/armv7-a/ArchVariant.cc
+++ b/src/compiler/codegen/arm/armv7-a/ArchVariant.cc
@@ -22,33 +22,33 @@
*/
InstructionSet oatInstructionSet()
{
- return kThumb2;
+ return kThumb2;
}
/* Architecture-specific initializations and checks go here */
bool oatArchVariantInit(void)
{
- return true;
+ return true;
}
int oatTargetOptHint(int key)
{
- int res = 0;
- switch (key) {
- case kMaxHoistDistance:
- res = 7;
- break;
- default:
- LOG(FATAL) << "Unknown target optimization hint key: " << key;
+ int res = 0;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 7;
+ break;
+ default:
+ LOG(FATAL) << "Unknown target optimization hint key: " << key;
}
- return res;
+ return res;
}
void oatGenMemBarrier(CompilationUnit* cUnit, int barrierKind)
{
#if ANDROID_SMP != 0
- LIR* dmb = newLIR1(cUnit, kThumb2Dmb, barrierKind);
- dmb->defMask = ENCODE_ALL;
+ LIR* dmb = newLIR1(cUnit, kThumb2Dmb, barrierKind);
+ dmb->defMask = ENCODE_ALL;
#endif
}
diff --git a/src/compiler/codegen/mips/ArchFactory.cc b/src/compiler/codegen/mips/ArchFactory.cc
index 5e41400..8317215 100644
--- a/src/compiler/codegen/mips/ArchFactory.cc
+++ b/src/compiler/codegen/mips/ArchFactory.cc
@@ -29,72 +29,72 @@
bool genAddLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc1, RegLocation rlSrc2)
{
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- /*
- * [v1 v0] = [a1 a0] + [a3 a2];
- * addu v0,a2,a0
- * addu t1,a3,a1
- * sltu v1,v0,a2
- * addu v1,v1,t1
- */
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ /*
+ * [v1 v0] = [a1 a0] + [a3 a2];
+ * addu v0,a2,a0
+ * addu t1,a3,a1
+ * sltu v1,v0,a2
+ * addu v1,v1,t1
+ */
- opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc2.lowReg, rlSrc1.lowReg);
- int tReg = oatAllocTemp(cUnit);
- opRegRegReg(cUnit, kOpAdd, tReg, rlSrc2.highReg, rlSrc1.highReg);
- newLIR3(cUnit, kMipsSltu, rlResult.highReg, rlResult.lowReg, rlSrc2.lowReg);
- opRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlResult.highReg, tReg);
- oatFreeTemp(cUnit, tReg);
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
+ opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc2.lowReg, rlSrc1.lowReg);
+ int tReg = oatAllocTemp(cUnit);
+ opRegRegReg(cUnit, kOpAdd, tReg, rlSrc2.highReg, rlSrc1.highReg);
+ newLIR3(cUnit, kMipsSltu, rlResult.highReg, rlResult.lowReg, rlSrc2.lowReg);
+ opRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlResult.highReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
}
bool genSubLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- /*
- * [v1 v0] = [a1 a0] - [a3 a2];
- * subu v0,a0,a2
- * subu v1,a1,a3
- * sltu t1,a0,v0
- * subu v1,v1,t1
- */
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ /*
+ * [v1 v0] = [a1 a0] - [a3 a2];
+ * subu v0,a0,a2
+ * subu v1,a1,a3
+ * sltu t1,a0,v0
+ * subu v1,v1,t1
+ */
- opRegRegReg(cUnit, kOpSub, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
- opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlSrc1.highReg, rlSrc2.highReg);
- int tReg = oatAllocTemp(cUnit);
- newLIR3(cUnit, kMipsSltu, tReg, rlSrc1.lowReg, rlResult.lowReg);
- opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
- oatFreeTemp(cUnit, tReg);
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
+ opRegRegReg(cUnit, kOpSub, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
+ opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlSrc1.highReg, rlSrc2.highReg);
+ int tReg = oatAllocTemp(cUnit);
+ newLIR3(cUnit, kMipsSltu, tReg, rlSrc1.lowReg, rlResult.lowReg);
+ opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
}
bool genNegLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc)
{
- rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- /*
- * [v1 v0] = -[a1 a0]
- * negu v0,a0
- * negu v1,a1
- * sltu t1,r_zero
- * subu v1,v1,t1
- */
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ /*
+ * [v1 v0] = -[a1 a0]
+ * negu v0,a0
+ * negu v1,a1
+ * sltu t1,r_zero
+ * subu v1,v1,t1
+ */
- opRegReg(cUnit, kOpNeg, rlResult.lowReg, rlSrc.lowReg);
- opRegReg(cUnit, kOpNeg, rlResult.highReg, rlSrc.highReg);
- int tReg = oatAllocTemp(cUnit);
- newLIR3(cUnit, kMipsSltu, tReg, r_ZERO, rlResult.lowReg);
- opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
- oatFreeTemp(cUnit, tReg);
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
+ opRegReg(cUnit, kOpNeg, rlResult.lowReg, rlSrc.lowReg);
+ opRegReg(cUnit, kOpNeg, rlResult.highReg, rlSrc.highReg);
+ int tReg = oatAllocTemp(cUnit);
+ newLIR3(cUnit, kMipsSltu, tReg, r_ZERO, rlResult.lowReg);
+ opRegRegReg(cUnit, kOpSub, rlResult.highReg, rlResult.highReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
}
void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset);
@@ -107,118 +107,113 @@
*/
int loadHelper(CompilationUnit* cUnit, int offset)
{
- int tReg = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rSELF, offset, tReg);
- return tReg;
+ int tReg = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rSELF, offset, tReg);
+ return tReg;
}
void spillCoreRegs(CompilationUnit* cUnit)
{
- if (cUnit->numCoreSpills == 0) {
- return;
+ if (cUnit->numCoreSpills == 0) {
+ return;
+ }
+ uint32_t mask = cUnit->coreSpillMask;
+ int offset = cUnit->numCoreSpills * 4;
+ opRegImm(cUnit, kOpSub, rSP, offset);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ offset -= 4;
+ storeWordDisp(cUnit, rSP, offset, reg);
}
- uint32_t mask = cUnit->coreSpillMask;
- int offset = cUnit->numCoreSpills * 4;
- opRegImm(cUnit, kOpSub, rSP, offset);
- for (int reg = 0; mask; mask >>= 1, reg++) {
- if (mask & 0x1) {
- offset -= 4;
- storeWordDisp(cUnit, rSP, offset, reg);
- }
- }
+ }
}
void unSpillCoreRegs(CompilationUnit* cUnit)
{
- if (cUnit->numCoreSpills == 0) {
- return;
+ if (cUnit->numCoreSpills == 0) {
+ return;
+ }
+ uint32_t mask = cUnit->coreSpillMask;
+ int offset = cUnit->frameSize;
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ offset -= 4;
+ loadWordDisp(cUnit, rSP, offset, reg);
}
- uint32_t mask = cUnit->coreSpillMask;
- int offset = cUnit->frameSize;
- for (int reg = 0; mask; mask >>= 1, reg++) {
- if (mask & 0x1) {
- offset -= 4;
- loadWordDisp(cUnit, rSP, offset, reg);
- }
- }
- opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize);
+ }
+ opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize);
}
void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
{
- int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
- /*
- * On entry, rARG0, rARG1, rARG2 & rARG3 are live. Let the register
- * allocation mechanism know so it doesn't try to use any of them when
- * expanding the frame or flushing. This leaves the utility
- * code with a single temp: r12. This should be enough.
- */
- oatLockTemp(cUnit, rARG0);
- oatLockTemp(cUnit, rARG1);
- oatLockTemp(cUnit, rARG2);
- oatLockTemp(cUnit, rARG3);
+ int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ /*
+ * On entry, rARG0, rARG1, rARG2 & rARG3 are live. Let the register
+ * allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with a single temp: r12. This should be enough.
+ */
+ oatLockTemp(cUnit, rARG0);
+ oatLockTemp(cUnit, rARG1);
+ oatLockTemp(cUnit, rARG2);
+ oatLockTemp(cUnit, rARG3);
- /*
- * We can safely skip the stack overflow check if we're
- * a leaf *and* our frame size < fudge factor.
- */
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- ((size_t)cUnit->frameSize <
- Thread::kStackOverflowReservedBytes));
- newLIR0(cUnit, kPseudoMethodEntry);
- int checkReg = oatAllocTemp(cUnit);
- int newSP = oatAllocTemp(cUnit);
- if (!skipOverflowCheck) {
- /* Load stack limit */
- loadWordDisp(cUnit, rSELF,
- Thread::StackEndOffset().Int32Value(), checkReg);
- }
- /* Spill core callee saves */
- spillCoreRegs(cUnit);
- /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
- DCHECK_EQ(cUnit->numFPSpills, 0);
- if (!skipOverflowCheck) {
- opRegRegImm(cUnit, kOpSub, newSP, rSP,
- cUnit->frameSize - (spillCount * 4));
- genRegRegCheck(cUnit, kCondCc, newSP, checkReg, NULL,
- kThrowStackOverflow);
- opRegCopy(cUnit, rSP, newSP); // Establish stack
- } else {
- opRegImm(cUnit, kOpSub, rSP,
- cUnit->frameSize - (spillCount * 4));
- }
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
+ ((size_t)cUnit->frameSize < Thread::kStackOverflowReservedBytes));
+ newLIR0(cUnit, kPseudoMethodEntry);
+ int checkReg = oatAllocTemp(cUnit);
+ int newSP = oatAllocTemp(cUnit);
+ if (!skipOverflowCheck) {
+ /* Load stack limit */
+ loadWordDisp(cUnit, rSELF, Thread::StackEndOffset().Int32Value(), checkReg);
+ }
+ /* Spill core callee saves */
+ spillCoreRegs(cUnit);
+ /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ DCHECK_EQ(cUnit->numFPSpills, 0);
+ if (!skipOverflowCheck) {
+ opRegRegImm(cUnit, kOpSub, newSP, rSP, cUnit->frameSize - (spillCount * 4));
+ genRegRegCheck(cUnit, kCondCc, newSP, checkReg, NULL, kThrowStackOverflow);
+ opRegCopy(cUnit, rSP, newSP); // Establish stack
+ } else {
+ opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - (spillCount * 4));
+ }
- flushIns(cUnit);
+ flushIns(cUnit);
- if (cUnit->genDebugger) {
- // Refresh update debugger callout
- loadWordDisp(cUnit, rSELF,
- ENTRYPOINT_OFFSET(pUpdateDebuggerFromCode), rSUSPEND);
- genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
- }
+ if (cUnit->genDebugger) {
+ // Refresh update debugger callout
+ loadWordDisp(cUnit, rSELF,
+ ENTRYPOINT_OFFSET(pUpdateDebuggerFromCode), rSUSPEND);
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
+ }
- oatFreeTemp(cUnit, rARG0);
- oatFreeTemp(cUnit, rARG1);
- oatFreeTemp(cUnit, rARG2);
- oatFreeTemp(cUnit, rARG3);
+ oatFreeTemp(cUnit, rARG0);
+ oatFreeTemp(cUnit, rARG1);
+ oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG3);
}
void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
{
- /*
- * In the exit path, rRET0/rRET1 are live - make sure they aren't
- * allocated by the register utilities as temps.
- */
- oatLockTemp(cUnit, rRET0);
- oatLockTemp(cUnit, rRET1);
+ /*
+ * In the exit path, rRET0/rRET1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ oatLockTemp(cUnit, rRET0);
+ oatLockTemp(cUnit, rRET1);
- newLIR0(cUnit, kPseudoMethodExit);
- /* If we're compiling for the debugger, generate an update callout */
- if (cUnit->genDebugger) {
- genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
- }
- unSpillCoreRegs(cUnit);
- opReg(cUnit, kOpBx, r_RA);
+ newLIR0(cUnit, kPseudoMethodExit);
+ /* If we're compiling for the debugger, generate an update callout */
+ if (cUnit->genDebugger) {
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
+ }
+ unSpillCoreRegs(cUnit);
+ opReg(cUnit, kOpBx, r_RA);
}
/*
@@ -228,55 +223,55 @@
*/
void removeRedundantBranches(CompilationUnit* cUnit)
{
- LIR* thisLIR;
+ LIR* thisLIR;
- for (thisLIR = (LIR*) cUnit->firstLIRInsn;
- thisLIR != (LIR*) cUnit->lastLIRInsn;
- thisLIR = NEXT_LIR(thisLIR)) {
+ for (thisLIR = (LIR*) cUnit->firstLIRInsn;
+ thisLIR != (LIR*) cUnit->lastLIRInsn;
+ thisLIR = NEXT_LIR(thisLIR)) {
- /* Branch to the next instruction */
- if (thisLIR->opcode == kMipsB) {
- LIR* nextLIR = thisLIR;
+ /* Branch to the next instruction */
+ if (thisLIR->opcode == kMipsB) {
+ LIR* nextLIR = thisLIR;
- while (true) {
- nextLIR = NEXT_LIR(nextLIR);
+ while (true) {
+ nextLIR = NEXT_LIR(nextLIR);
- /*
- * Is the branch target the next instruction?
- */
- if (nextLIR == (LIR*) thisLIR->target) {
- thisLIR->flags.isNop = true;
- break;
- }
-
- /*
- * Found real useful stuff between the branch and the target.
- * Need to explicitly check the lastLIRInsn here because it
- * might be the last real instruction.
- */
- if (!isPseudoOpcode(nextLIR->opcode) ||
- (nextLIR = (LIR*) cUnit->lastLIRInsn))
- break;
- }
+ /*
+ * Is the branch target the next instruction?
+ */
+ if (nextLIR == (LIR*) thisLIR->target) {
+ thisLIR->flags.isNop = true;
+ break;
}
+
+ /*
+ * Found real useful stuff between the branch and the target.
+ * Need to explicitly check the lastLIRInsn here because it
+ * might be the last real instruction.
+ */
+ if (!isPseudoOpcode(nextLIR->opcode) ||
+ (nextLIR = (LIR*) cUnit->lastLIRInsn))
+ break;
+ }
}
+ }
}
/* Common initialization routine for an architecture family */
bool oatArchInit()
{
- int i;
+ int i;
- for (i = 0; i < kMipsLast; i++) {
- if (EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
- " is wrong: expecting " << i << ", seeing " <<
- (int)EncodingMap[i].opcode;
- }
+ for (i = 0; i < kMipsLast; i++) {
+ if (EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
+ " is wrong: expecting " << i << ", seeing " <<
+ (int)EncodingMap[i].opcode;
}
+ }
- return oatArchVariantInit();
+ return oatArchVariantInit();
}
} // namespace art
diff --git a/src/compiler/codegen/mips/ArchUtility.cc b/src/compiler/codegen/mips/ArchUtility.cc
index 4252d50..f1afa78 100644
--- a/src/compiler/codegen/mips/ArchUtility.cc
+++ b/src/compiler/codegen/mips/ArchUtility.cc
@@ -25,10 +25,10 @@
/* For dumping instructions */
#define MIPS_REG_COUNT 32
static const char *mipsRegName[MIPS_REG_COUNT] = {
- "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
};
/*
@@ -37,145 +37,141 @@
*/
std::string buildInsnString(const char *fmt, LIR *lir, unsigned char* baseAddr)
{
- std::string buf;
- int i;
- const char *fmtEnd = &fmt[strlen(fmt)];
- char tbuf[256];
- char nc;
- while (fmt < fmtEnd) {
- int operand;
- if (*fmt == '!') {
- fmt++;
- DCHECK_LT(fmt, fmtEnd);
- nc = *fmt++;
- if (nc=='!') {
- strcpy(tbuf, "!");
- } else {
- DCHECK_LT(fmt, fmtEnd);
- DCHECK_LT((unsigned)(nc-'0'), 4u);
- operand = lir->operands[nc-'0'];
- switch (*fmt++) {
- case 'b':
- strcpy(tbuf,"0000");
- for (i=3; i>= 0; i--) {
- tbuf[i] += operand & 1;
- operand >>= 1;
- }
- break;
- case 's':
- sprintf(tbuf,"$f%d",operand & FP_REG_MASK);
- break;
- case 'S':
- DCHECK_EQ(((operand & FP_REG_MASK) & 1), 0);
- sprintf(tbuf,"$f%d",operand & FP_REG_MASK);
- break;
- case 'h':
- sprintf(tbuf,"%04x", operand);
- break;
- case 'M':
- case 'd':
- sprintf(tbuf,"%d", operand);
- break;
- case 'D':
- sprintf(tbuf,"%d", operand+1);
- break;
- case 'E':
- sprintf(tbuf,"%d", operand*4);
- break;
- case 'F':
- sprintf(tbuf,"%d", operand*2);
- break;
- case 't':
- sprintf(tbuf,"0x%08x (L%p)",
- (int) baseAddr + lir->offset + 4 +
- (operand << 2),
- lir->target);
- break;
- case 'T':
- sprintf(tbuf,"0x%08x",
- (int) (operand << 2));
- break;
- case 'u': {
- int offset_1 = lir->operands[0];
- int offset_2 = NEXT_LIR(lir)->operands[0];
- intptr_t target =
- ((((intptr_t) baseAddr + lir->offset + 4) &
- ~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
- 0xfffffffc;
- sprintf(tbuf, "%p", (void *) target);
- break;
- }
+ std::string buf;
+ int i;
+ const char *fmtEnd = &fmt[strlen(fmt)];
+ char tbuf[256];
+ char nc;
+ while (fmt < fmtEnd) {
+ int operand;
+ if (*fmt == '!') {
+ fmt++;
+ DCHECK_LT(fmt, fmtEnd);
+ nc = *fmt++;
+ if (nc=='!') {
+ strcpy(tbuf, "!");
+ } else {
+ DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT((unsigned)(nc-'0'), 4u);
+ operand = lir->operands[nc-'0'];
+ switch (*fmt++) {
+ case 'b':
+ strcpy(tbuf,"0000");
+ for (i=3; i>= 0; i--) {
+ tbuf[i] += operand & 1;
+ operand >>= 1;
+ }
+ break;
+ case 's':
+ sprintf(tbuf,"$f%d",operand & FP_REG_MASK);
+ break;
+ case 'S':
+ DCHECK_EQ(((operand & FP_REG_MASK) & 1), 0);
+ sprintf(tbuf,"$f%d",operand & FP_REG_MASK);
+ break;
+ case 'h':
+ sprintf(tbuf,"%04x", operand);
+ break;
+ case 'M':
+ case 'd':
+ sprintf(tbuf,"%d", operand);
+ break;
+ case 'D':
+ sprintf(tbuf,"%d", operand+1);
+ break;
+ case 'E':
+ sprintf(tbuf,"%d", operand*4);
+ break;
+ case 'F':
+ sprintf(tbuf,"%d", operand*2);
+ break;
+ case 't':
+ sprintf(tbuf,"0x%08x (L%p)", (int) baseAddr + lir->offset + 4 +
+ (operand << 2), lir->target);
+ break;
+ case 'T':
+ sprintf(tbuf,"0x%08x", (int) (operand << 2));
+ break;
+ case 'u': {
+ int offset_1 = lir->operands[0];
+ int offset_2 = NEXT_LIR(lir)->operands[0];
+ intptr_t target =
+ ((((intptr_t) baseAddr + lir->offset + 4) & ~3) +
+ (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
+ sprintf(tbuf, "%p", (void *) target);
+ break;
+ }
- /* Nothing to print for BLX_2 */
- case 'v':
- strcpy(tbuf, "see above");
- break;
- case 'r':
- DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
- strcpy(tbuf, mipsRegName[operand]);
- break;
- case 'N':
- // Placeholder for delay slot handling
- strcpy(tbuf, "; nop");
- break;
- default:
- strcpy(tbuf,"DecodeError");
- break;
- }
- buf += tbuf;
- }
- } else {
- buf += *fmt++;
- }
+ /* Nothing to print for BLX_2 */
+ case 'v':
+ strcpy(tbuf, "see above");
+ break;
+ case 'r':
+ DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
+ strcpy(tbuf, mipsRegName[operand]);
+ break;
+ case 'N':
+ // Placeholder for delay slot handling
+ strcpy(tbuf, "; nop");
+ break;
+ default:
+ strcpy(tbuf,"DecodeError");
+ break;
+ }
+ buf += tbuf;
+ }
+ } else {
+ buf += *fmt++;
}
- return buf;
+ }
+ return buf;
}
// FIXME: need to redo resourse maps for MIPS - fix this at that time
void oatDumpResourceMask(LIR *lir, u8 mask, const char *prefix)
{
- char buf[256];
- buf[0] = 0;
- LIR *mipsLIR = (LIR *) lir;
+ char buf[256];
+ buf[0] = 0;
+ LIR *mipsLIR = (LIR *) lir;
- if (mask == ENCODE_ALL) {
- strcpy(buf, "all");
- } else {
- char num[8];
- int i;
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
- for (i = 0; i < kRegEnd; i++) {
- if (mask & (1ULL << i)) {
- sprintf(num, "%d ", i);
- strcat(buf, num);
- }
- }
-
- if (mask & ENCODE_CCODE) {
- strcat(buf, "cc ");
- }
- if (mask & ENCODE_FP_STATUS) {
- strcat(buf, "fpcc ");
- }
- /* Memory bits */
- if (mipsLIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", mipsLIR->aliasInfo & 0xffff,
- (mipsLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
- }
- if (mask & ENCODE_LITERAL) {
- strcat(buf, "lit ");
- }
-
- if (mask & ENCODE_HEAP_REF) {
- strcat(buf, "heap ");
- }
- if (mask & ENCODE_MUST_NOT_ALIAS) {
- strcat(buf, "noalias ");
- }
+ for (i = 0; i < kRegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
}
- if (buf[0]) {
- LOG(INFO) << prefix << ": " << buf;
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
}
+ if (mask & ENCODE_FP_STATUS) {
+ strcat(buf, "fpcc ");
+ }
+ /* Memory bits */
+ if (mipsLIR && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", mipsLIR->aliasInfo & 0xffff,
+ (mipsLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
}
} // namespace art
diff --git a/src/compiler/codegen/mips/Assemble.cc b/src/compiler/codegen/mips/Assemble.cc
index 1d629be..c19effe 100644
--- a/src/compiler/codegen/mips/Assemble.cc
+++ b/src/compiler/codegen/mips/Assemble.cc
@@ -463,52 +463,52 @@
*/
void convertShortToLongBranch(CompilationUnit* cUnit, LIR* lir)
{
- // For conditional branches we'll need to reverse the sense
- bool unconditional = false;
- int opcode = lir->opcode;
- int dalvikOffset = lir->dalvikOffset;
- switch (opcode) {
- case kMipsBal:
- LOG(FATAL) << "long branch and link unsupported";
- case kMipsB:
- unconditional = true;
- break;
- case kMipsBeq: opcode = kMipsBne; break;
- case kMipsBne: opcode = kMipsBeq; break;
- case kMipsBeqz: opcode = kMipsBnez; break;
- case kMipsBgez: opcode = kMipsBltz; break;
- case kMipsBgtz: opcode = kMipsBlez; break;
- case kMipsBlez: opcode = kMipsBgtz; break;
- case kMipsBltz: opcode = kMipsBgez; break;
- case kMipsBnez: opcode = kMipsBeqz; break;
- default:
- LOG(FATAL) << "Unexpected branch kind " << (int)opcode;
- }
- LIR* hopTarget = NULL;
- if (!unconditional) {
- hopTarget = rawLIR(cUnit, dalvikOffset, kPseudoTargetLabel);
- LIR* hopBranch = rawLIR(cUnit, dalvikOffset, opcode, lir->operands[0],
- lir->operands[1], 0, 0, 0, hopTarget);
- oatInsertLIRBefore(lir, hopBranch);
- }
- LIR* currPC = rawLIR(cUnit, dalvikOffset, kMipsCurrPC);
- oatInsertLIRBefore(lir, currPC);
- LIR* anchor = rawLIR(cUnit, dalvikOffset, kPseudoTargetLabel);
- LIR* deltaHi = rawLIR(cUnit, dalvikOffset, kMipsDeltaHi, r_AT, 0,
- (uintptr_t)anchor, 0, 0, lir->target);
- oatInsertLIRBefore(lir, deltaHi);
- oatInsertLIRBefore(lir, anchor);
- LIR* deltaLo = rawLIR(cUnit, dalvikOffset, kMipsDeltaLo, r_AT, 0,
- (uintptr_t)anchor, 0, 0, lir->target);
- oatInsertLIRBefore(lir, deltaLo);
- LIR* addu = rawLIR(cUnit, dalvikOffset, kMipsAddu, r_AT, r_AT, r_RA);
- oatInsertLIRBefore(lir, addu);
- LIR* jr = rawLIR(cUnit, dalvikOffset, kMipsJr, r_AT);
- oatInsertLIRBefore(lir, jr);
- if (!unconditional) {
- oatInsertLIRBefore(lir, hopTarget);
- }
- lir->flags.isNop = true;
+ // For conditional branches we'll need to reverse the sense
+ bool unconditional = false;
+ int opcode = lir->opcode;
+ int dalvikOffset = lir->dalvikOffset;
+ switch (opcode) {
+ case kMipsBal:
+ LOG(FATAL) << "long branch and link unsupported";
+ case kMipsB:
+ unconditional = true;
+ break;
+ case kMipsBeq: opcode = kMipsBne; break;
+ case kMipsBne: opcode = kMipsBeq; break;
+ case kMipsBeqz: opcode = kMipsBnez; break;
+ case kMipsBgez: opcode = kMipsBltz; break;
+ case kMipsBgtz: opcode = kMipsBlez; break;
+ case kMipsBlez: opcode = kMipsBgtz; break;
+ case kMipsBltz: opcode = kMipsBgez; break;
+ case kMipsBnez: opcode = kMipsBeqz; break;
+ default:
+ LOG(FATAL) << "Unexpected branch kind " << (int)opcode;
+ }
+ LIR* hopTarget = NULL;
+ if (!unconditional) {
+ hopTarget = rawLIR(cUnit, dalvikOffset, kPseudoTargetLabel);
+ LIR* hopBranch = rawLIR(cUnit, dalvikOffset, opcode, lir->operands[0],
+ lir->operands[1], 0, 0, 0, hopTarget);
+ oatInsertLIRBefore(lir, hopBranch);
+ }
+ LIR* currPC = rawLIR(cUnit, dalvikOffset, kMipsCurrPC);
+ oatInsertLIRBefore(lir, currPC);
+ LIR* anchor = rawLIR(cUnit, dalvikOffset, kPseudoTargetLabel);
+ LIR* deltaHi = rawLIR(cUnit, dalvikOffset, kMipsDeltaHi, r_AT, 0,
+ (uintptr_t)anchor, 0, 0, lir->target);
+ oatInsertLIRBefore(lir, deltaHi);
+ oatInsertLIRBefore(lir, anchor);
+ LIR* deltaLo = rawLIR(cUnit, dalvikOffset, kMipsDeltaLo, r_AT, 0,
+ (uintptr_t)anchor, 0, 0, lir->target);
+ oatInsertLIRBefore(lir, deltaLo);
+ LIR* addu = rawLIR(cUnit, dalvikOffset, kMipsAddu, r_AT, r_AT, r_RA);
+ oatInsertLIRBefore(lir, addu);
+ LIR* jr = rawLIR(cUnit, dalvikOffset, kMipsJr, r_AT);
+ oatInsertLIRBefore(lir, jr);
+ if (!unconditional) {
+ oatInsertLIRBefore(lir, hopTarget);
+ }
+ lir->flags.isNop = true;
}
/*
@@ -518,201 +518,201 @@
* sequence or request that the trace be shortened and retried.
*/
AssemblerStatus oatAssembleInstructions(CompilationUnit *cUnit,
- intptr_t startAddr)
+ intptr_t startAddr)
{
- LIR *lir;
- AssemblerStatus res = kSuccess; // Assume success
+ LIR *lir;
+ AssemblerStatus res = kSuccess; // Assume success
- for (lir = (LIR *) cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
- if (lir->opcode < 0) {
- continue;
- }
-
-
- if (lir->flags.isNop) {
- continue;
- }
-
- if (lir->flags.pcRelFixup) {
- if (lir->opcode == kMipsDelta) {
- /*
- * The "Delta" pseudo-ops load the difference between
- * two pc-relative locations into a the target register
- * found in operands[0]. The delta is determined by
- * (label2 - label1), where label1 is a standard
- * kPseudoTargetLabel and is stored in operands[2].
- * If operands[3] is null, then label2 is a kPseudoTargetLabel
- * and is found in lir->target. If operands[3] is non-NULL,
- * then it is a Switch/Data table.
- */
- int offset1 = ((LIR*)lir->operands[2])->offset;
- SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
- int offset2 = tabRec ? tabRec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- if ((delta & 0xffff) == delta) {
- // Fits
- lir->operands[1] = delta;
- } else {
- // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
- LIR *newDeltaHi =
- rawLIR(cUnit, lir->dalvikOffset, kMipsDeltaHi,
- lir->operands[0], 0, lir->operands[2],
- lir->operands[3], 0, lir->target);
- oatInsertLIRBefore((LIR*)lir, (LIR*)newDeltaHi);
- LIR *newDeltaLo =
- rawLIR(cUnit, lir->dalvikOffset, kMipsDeltaLo,
- lir->operands[0], 0, lir->operands[2],
- lir->operands[3], 0, lir->target);
- oatInsertLIRBefore((LIR*)lir, (LIR*)newDeltaLo);
- lir->flags.isNop = true;
- res = kRetryAll;
- }
- } else if (lir->opcode == kMipsDeltaLo) {
- int offset1 = ((LIR*)lir->operands[2])->offset;
- SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
- int offset2 = tabRec ? tabRec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- lir->operands[1] = delta & 0xffff;
- } else if (lir->opcode == kMipsDeltaHi) {
- int offset1 = ((LIR*)lir->operands[2])->offset;
- SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
- int offset2 = tabRec ? tabRec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- lir->operands[1] = (delta >> 16) & 0xffff;
- } else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
- LIR *targetLIR = (LIR *) lir->target;
- intptr_t pc = lir->offset + 4;
- intptr_t target = targetLIR->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- convertShortToLongBranch(cUnit, lir);
- } else {
- lir->operands[0] = delta >> 2;
- }
- } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
- LIR *targetLIR = (LIR *) lir->target;
- intptr_t pc = lir->offset + 4;
- intptr_t target = targetLIR->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- convertShortToLongBranch(cUnit, lir);
- } else {
- lir->operands[1] = delta >> 2;
- }
- } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
- LIR *targetLIR = (LIR *) lir->target;
- intptr_t pc = lir->offset + 4;
- intptr_t target = targetLIR->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- convertShortToLongBranch(cUnit, lir);
- } else {
- lir->operands[2] = delta >> 2;
- }
- } else if (lir->opcode == kMipsJal) {
- intptr_t curPC = (startAddr + lir->offset + 4) & ~3;
- intptr_t target = lir->operands[0];
- /* ensure PC-region branch can be used */
- DCHECK_EQ((curPC & 0xF0000000), (target & 0xF0000000));
- if (target & 0x3) {
- LOG(FATAL) << "Jump target not multiple of 4: " << target;
- }
- lir->operands[0] = target >> 2;
- } else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
- LIR *targetLIR = (LIR *) lir->target;
- intptr_t target = startAddr + targetLIR->offset;
- lir->operands[1] = target >> 16;
- } else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
- LIR *targetLIR = (LIR *) lir->target;
- intptr_t target = startAddr + targetLIR->offset;
- lir->operands[2] = lir->operands[2] + target;
- }
- }
-
- /*
- * If one of the pc-relative instructions expanded we'll have
- * to make another pass. Don't bother to fully assemble the
- * instruction.
- */
- if (res != kSuccess) {
- continue;
- }
- const MipsEncodingMap *encoder = &EncodingMap[lir->opcode];
- u4 bits = encoder->skeleton;
- int i;
- for (i = 0; i < 4; i++) {
- u4 operand;
- u4 value;
- operand = lir->operands[i];
- switch (encoder->fieldLoc[i].kind) {
- case kFmtUnused:
- break;
- case kFmtBitBlt:
- if (encoder->fieldLoc[i].start == 0 && encoder->fieldLoc[i].end == 31) {
- value = operand;
- } else {
- value = (operand << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
- }
- bits |= value;
- break;
- case kFmtBlt5_2:
- value = (operand & 0x1f);
- bits |= (value << encoder->fieldLoc[i].start);
- bits |= (value << encoder->fieldLoc[i].end);
- break;
- case kFmtDfp: {
- DCHECK(DOUBLEREG(operand));
- DCHECK((operand & 0x1) == 0);
- value = ((operand & FP_REG_MASK) << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
- bits |= value;
- break;
- }
- case kFmtSfp:
- DCHECK(SINGLEREG(operand));
- value = ((operand & FP_REG_MASK) << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
- bits |= value;
- break;
- default:
- LOG(FATAL) << "Bad encoder format: "
- << (int)encoder->fieldLoc[i].kind;
- }
- }
- // FIXME: need multi-endian handling here
- cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
- cUnit->codeBuffer.push_back(bits & 0xff);
- // TUNING: replace with proper delay slot handling
- if (encoder->size == 8) {
- const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
- u4 bits = encoder->skeleton;
- cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
- cUnit->codeBuffer.push_back(bits & 0xff);
- }
+ for (lir = (LIR *) cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
+ if (lir->opcode < 0) {
+ continue;
}
- return res;
+
+
+ if (lir->flags.isNop) {
+ continue;
+ }
+
+ if (lir->flags.pcRelFixup) {
+ if (lir->opcode == kMipsDelta) {
+ /*
+ * The "Delta" pseudo-ops load the difference between
+ * two pc-relative locations into a the target register
+ * found in operands[0]. The delta is determined by
+ * (label2 - label1), where label1 is a standard
+ * kPseudoTargetLabel and is stored in operands[2].
+ * If operands[3] is null, then label2 is a kPseudoTargetLabel
+ * and is found in lir->target. If operands[3] is non-NULL,
+ * then it is a Switch/Data table.
+ */
+ int offset1 = ((LIR*)lir->operands[2])->offset;
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
+ int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ if ((delta & 0xffff) == delta) {
+ // Fits
+ lir->operands[1] = delta;
+ } else {
+ // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
+ LIR *newDeltaHi =
+ rawLIR(cUnit, lir->dalvikOffset, kMipsDeltaHi,
+ lir->operands[0], 0, lir->operands[2],
+ lir->operands[3], 0, lir->target);
+ oatInsertLIRBefore((LIR*)lir, (LIR*)newDeltaHi);
+ LIR *newDeltaLo =
+ rawLIR(cUnit, lir->dalvikOffset, kMipsDeltaLo,
+ lir->operands[0], 0, lir->operands[2],
+ lir->operands[3], 0, lir->target);
+ oatInsertLIRBefore((LIR*)lir, (LIR*)newDeltaLo);
+ lir->flags.isNop = true;
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kMipsDeltaLo) {
+ int offset1 = ((LIR*)lir->operands[2])->offset;
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
+ int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = delta & 0xffff;
+ } else if (lir->opcode == kMipsDeltaHi) {
+ int offset1 = ((LIR*)lir->operands[2])->offset;
+ SwitchTable *tabRec = (SwitchTable*)lir->operands[3];
+ int offset2 = tabRec ? tabRec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = (delta >> 16) & 0xffff;
+ } else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ convertShortToLongBranch(cUnit, lir);
+ } else {
+ lir->operands[0] = delta >> 2;
+ }
+ } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ convertShortToLongBranch(cUnit, lir);
+ } else {
+ lir->operands[1] = delta >> 2;
+ }
+ } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t pc = lir->offset + 4;
+ intptr_t target = targetLIR->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ convertShortToLongBranch(cUnit, lir);
+ } else {
+ lir->operands[2] = delta >> 2;
+ }
+ } else if (lir->opcode == kMipsJal) {
+ intptr_t curPC = (startAddr + lir->offset + 4) & ~3;
+ intptr_t target = lir->operands[0];
+ /* ensure PC-region branch can be used */
+ DCHECK_EQ((curPC & 0xF0000000), (target & 0xF0000000));
+ if (target & 0x3) {
+ LOG(FATAL) << "Jump target not multiple of 4: " << target;
+ }
+ lir->operands[0] = target >> 2;
+ } else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t target = startAddr + targetLIR->offset;
+ lir->operands[1] = target >> 16;
+ } else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
+ LIR *targetLIR = (LIR *) lir->target;
+ intptr_t target = startAddr + targetLIR->offset;
+ lir->operands[2] = lir->operands[2] + target;
+ }
+ }
+
+ /*
+ * If one of the pc-relative instructions expanded we'll have
+ * to make another pass. Don't bother to fully assemble the
+ * instruction.
+ */
+ if (res != kSuccess) {
+ continue;
+ }
+ const MipsEncodingMap *encoder = &EncodingMap[lir->opcode];
+ u4 bits = encoder->skeleton;
+ int i;
+ for (i = 0; i < 4; i++) {
+ u4 operand;
+ u4 value;
+ operand = lir->operands[i];
+ switch (encoder->fieldLoc[i].kind) {
+ case kFmtUnused:
+ break;
+ case kFmtBitBlt:
+ if (encoder->fieldLoc[i].start == 0 && encoder->fieldLoc[i].end == 31) {
+ value = operand;
+ } else {
+ value = (operand << encoder->fieldLoc[i].start) &
+ ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ }
+ bits |= value;
+ break;
+ case kFmtBlt5_2:
+ value = (operand & 0x1f);
+ bits |= (value << encoder->fieldLoc[i].start);
+ bits |= (value << encoder->fieldLoc[i].end);
+ break;
+ case kFmtDfp: {
+ DCHECK(DOUBLEREG(operand));
+ DCHECK((operand & 0x1) == 0);
+ value = ((operand & FP_REG_MASK) << encoder->fieldLoc[i].start) &
+ ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ }
+ case kFmtSfp:
+ DCHECK(SINGLEREG(operand));
+ value = ((operand & FP_REG_MASK) << encoder->fieldLoc[i].start) &
+ ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ default:
+ LOG(FATAL) << "Bad encoder format: "
+ << (int)encoder->fieldLoc[i].kind;
+ }
+ }
+ // FIXME: need multi-endian handling here
+ cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
+ cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
+ cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
+ cUnit->codeBuffer.push_back(bits & 0xff);
+ // TUNING: replace with proper delay slot handling
+ if (encoder->size == 8) {
+ const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
+ u4 bits = encoder->skeleton;
+ cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
+ cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
+ cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
+ cUnit->codeBuffer.push_back(bits & 0xff);
+ }
+ }
+ return res;
}
int oatGetInsnSize(LIR* lir)
{
- return EncodingMap[lir->opcode].size;
+ return EncodingMap[lir->opcode].size;
}
/*
* Target-dependent offset assignment.
@@ -720,29 +720,29 @@
*/
int oatAssignInsnOffsets(CompilationUnit* cUnit)
{
- LIR* mipsLIR;
- int offset = 0;
+ LIR* mipsLIR;
+ int offset = 0;
- for (mipsLIR = (LIR *) cUnit->firstLIRInsn;
- mipsLIR;
- mipsLIR = NEXT_LIR(mipsLIR)) {
- mipsLIR->offset = offset;
- if (mipsLIR->opcode >= 0) {
- if (!mipsLIR->flags.isNop) {
- offset += mipsLIR->flags.size;
- }
- } else if (mipsLIR->opcode == kPseudoPseudoAlign4) {
- if (offset & 0x2) {
- offset += 2;
- mipsLIR->operands[0] = 1;
- } else {
- mipsLIR->operands[0] = 0;
- }
- }
- /* Pseudo opcodes don't consume space */
+ for (mipsLIR = (LIR *) cUnit->firstLIRInsn;
+ mipsLIR;
+ mipsLIR = NEXT_LIR(mipsLIR)) {
+ mipsLIR->offset = offset;
+ if (mipsLIR->opcode >= 0) {
+ if (!mipsLIR->flags.isNop) {
+ offset += mipsLIR->flags.size;
+ }
+ } else if (mipsLIR->opcode == kPseudoPseudoAlign4) {
+ if (offset & 0x2) {
+ offset += 2;
+ mipsLIR->operands[0] = 1;
+ } else {
+ mipsLIR->operands[0] = 0;
+ }
}
+ /* Pseudo opcodes don't consume space */
+ }
- return offset;
+ return offset;
}
} // namespace art
diff --git a/src/compiler/codegen/mips/Codegen.h b/src/compiler/codegen/mips/Codegen.h
index e11119f..6ddc5ac 100644
--- a/src/compiler/codegen/mips/Codegen.h
+++ b/src/compiler/codegen/mips/Codegen.h
@@ -68,10 +68,10 @@
*/
inline RegisterClass oatRegClassBySize(OpSize size)
{
- return (size == kUnsignedHalf ||
- size == kSignedHalf ||
- size == kUnsignedByte ||
- size == kSignedByte ) ? kCoreReg : kAnyReg;
+ return (size == kUnsignedHalf ||
+ size == kSignedHalf ||
+ size == kUnsignedByte ||
+ size == kSignedByte ) ? kCoreReg : kAnyReg;
}
/*
@@ -83,12 +83,12 @@
*/
#if __BYTE_ORDER == __LITTLE_ENDIAN
inline s4 s4FromSwitchData(const void* switchData) {
- return *(s4*) switchData;
+ return *(s4*) switchData;
}
#else
inline s4 s4FromSwitchData(const void* switchData) {
- u2* data = switchData;
- return data[0] | (((s4) data[1]) << 16);
+ u2* data = switchData;
+ return data[0] | (((s4) data[1]) << 16);
}
#endif
@@ -96,7 +96,6 @@
extern void oatSetupResourceMasks(LIR* lir);
-extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest,
- int rSrc);
+extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc);
} // namespace art
diff --git a/src/compiler/codegen/mips/FP/MipsFP.cc b/src/compiler/codegen/mips/FP/MipsFP.cc
index 948ded6..c101039 100644
--- a/src/compiler/codegen/mips/FP/MipsFP.cc
+++ b/src/compiler/codegen/mips/FP/MipsFP.cc
@@ -22,48 +22,48 @@
RegLocation rlSrc1, RegLocation rlSrc2)
{
#ifdef __mips_hard_float
- int op = kMipsNop;
- RegLocation rlResult;
+ int op = kMipsNop;
+ RegLocation rlResult;
- /*
- * Don't attempt to optimize register usage since these opcodes call out to
- * the handlers.
- */
- switch (mir->dalvikInsn.opcode) {
- case Instruction::ADD_FLOAT_2ADDR:
- case Instruction::ADD_FLOAT:
- op = kMipsFadds;
- break;
- case Instruction::SUB_FLOAT_2ADDR:
- case Instruction::SUB_FLOAT:
- op = kMipsFsubs;
- break;
- case Instruction::DIV_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT:
- op = kMipsFdivs;
- break;
- case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::MUL_FLOAT:
- op = kMipsFmuls;
- break;
- case Instruction::REM_FLOAT_2ADDR:
- case Instruction::REM_FLOAT:
- case Instruction::NEG_FLOAT: {
- return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
- }
- default:
- return true;
+ /*
+ * Don't attempt to optimize register usage since these opcodes call out to
+ * the handlers.
+ */
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_FLOAT:
+ op = kMipsFadds;
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT:
+ op = kMipsFsubs;
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT:
+ op = kMipsFdivs;
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT:
+ op = kMipsFmuls;
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT:
+ case Instruction::NEG_FLOAT: {
+ return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
}
- rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- newLIR3(cUnit, (MipsOpCode)op, rlResult.lowReg, rlSrc1.lowReg,
- rlSrc2.lowReg);
- storeValue(cUnit, rlDest, rlResult);
+ default:
+ return true;
+ }
+ rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR3(cUnit, (MipsOpCode)op, rlResult.lowReg, rlSrc1.lowReg,
+ rlSrc2.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
- return false;
+ return false;
#else
- return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
#endif
}
@@ -72,157 +72,158 @@
RegLocation rlSrc2)
{
#ifdef __mips_hard_float
- int op = kMipsNop;
- RegLocation rlResult;
+ int op = kMipsNop;
+ RegLocation rlResult;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::ADD_DOUBLE_2ADDR:
- case Instruction::ADD_DOUBLE:
- op = kMipsFaddd;
- break;
- case Instruction::SUB_DOUBLE_2ADDR:
- case Instruction::SUB_DOUBLE:
- op = kMipsFsubd;
- break;
- case Instruction::DIV_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE:
- op = kMipsFdivd;
- break;
- case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::MUL_DOUBLE:
- op = kMipsFmuld;
- break;
- case Instruction::REM_DOUBLE_2ADDR:
- case Instruction::REM_DOUBLE:
- case Instruction::NEG_DOUBLE: {
- return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
- }
- default:
- return true;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ op = kMipsFaddd;
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ op = kMipsFsubd;
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ op = kMipsFdivd;
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ op = kMipsFmuld;
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE:
+ case Instruction::NEG_DOUBLE: {
+ return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
}
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
- DCHECK(rlSrc1.wide);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
- DCHECK(rlSrc2.wide);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- DCHECK(rlDest.wide);
- DCHECK(rlResult.wide);
- newLIR3(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg),
- S2D(rlSrc1.lowReg, rlSrc1.highReg),
- S2D(rlSrc2.lowReg, rlSrc2.highReg));
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
+ default:
+ return true;
+ }
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
+ DCHECK(rlSrc1.wide);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
+ DCHECK(rlSrc2.wide);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ DCHECK(rlDest.wide);
+ DCHECK(rlResult.wide);
+ newLIR3(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg),
+ S2D(rlSrc1.lowReg, rlSrc1.highReg),
+ S2D(rlSrc2.lowReg, rlSrc2.highReg));
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
#else
- return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
+ return genArithOpDoublePortable(cUnit, mir, rlDest, rlSrc1, rlSrc2);
#endif
}
static bool genConversion(CompilationUnit *cUnit, MIR *mir)
{
#ifdef __mips_hard_float
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- bool longSrc = false;
- bool longDest = false;
- RegLocation rlSrc;
- RegLocation rlDest;
- int op = kMipsNop;
- int srcReg;
- RegLocation rlResult;
- switch (opcode) {
- case Instruction::INT_TO_FLOAT:
- longSrc = false;
- longDest = false;
- op = kMipsFcvtsw;
- break;
- case Instruction::DOUBLE_TO_FLOAT:
- longSrc = true;
- longDest = false;
- op = kMipsFcvtsd;
- break;
- case Instruction::FLOAT_TO_DOUBLE:
- longSrc = false;
- longDest = true;
- op = kMipsFcvtds;
- break;
- case Instruction::INT_TO_DOUBLE:
- longSrc = false;
- longDest = true;
- op = kMipsFcvtdw;
- break;
- case Instruction::FLOAT_TO_INT:
- case Instruction::DOUBLE_TO_INT:
- case Instruction::LONG_TO_DOUBLE:
- case Instruction::FLOAT_TO_LONG:
- case Instruction::LONG_TO_FLOAT:
- case Instruction::DOUBLE_TO_LONG:
- return genConversionPortable(cUnit, mir);
- default:
- return true;
- }
- if (longSrc) {
- rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- rlSrc = loadValueWide(cUnit, rlSrc, kFPReg);
- srcReg = S2D(rlSrc.lowReg, rlSrc.highReg);
- } else {
- rlSrc = oatGetSrc(cUnit, mir, 0);
- rlSrc = loadValue(cUnit, rlSrc, kFPReg);
- srcReg = rlSrc.lowReg;
- }
- if (longDest) {
- rlDest = oatGetDestWide(cUnit, mir, 0, 1);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- newLIR2(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg), srcReg);
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- rlDest = oatGetDest(cUnit, mir, 0);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- newLIR2(cUnit, (MipsOpCode)op, rlResult.lowReg, srcReg);
- storeValue(cUnit, rlDest, rlResult);
- }
- return false;
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ bool longSrc = false;
+ bool longDest = false;
+ RegLocation rlSrc;
+ RegLocation rlDest;
+ int op = kMipsNop;
+ int srcReg;
+ RegLocation rlResult;
+ switch (opcode) {
+ case Instruction::INT_TO_FLOAT:
+ longSrc = false;
+ longDest = false;
+ op = kMipsFcvtsw;
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ longSrc = true;
+ longDest = false;
+ op = kMipsFcvtsd;
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ longSrc = false;
+ longDest = true;
+ op = kMipsFcvtds;
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ longSrc = false;
+ longDest = true;
+ op = kMipsFcvtdw;
+ break;
+ case Instruction::FLOAT_TO_INT:
+ case Instruction::DOUBLE_TO_INT:
+ case Instruction::LONG_TO_DOUBLE:
+ case Instruction::FLOAT_TO_LONG:
+ case Instruction::LONG_TO_FLOAT:
+ case Instruction::DOUBLE_TO_LONG:
+ return genConversionPortable(cUnit, mir);
+ default:
+ return true;
+ }
+ if (longSrc) {
+ rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
+ rlSrc = loadValueWide(cUnit, rlSrc, kFPReg);
+ srcReg = S2D(rlSrc.lowReg, rlSrc.highReg);
+ } else {
+ rlSrc = oatGetSrc(cUnit, mir, 0);
+ rlSrc = loadValue(cUnit, rlSrc, kFPReg);
+ srcReg = rlSrc.lowReg;
+ }
+ if (longDest) {
+ rlDest = oatGetDestWide(cUnit, mir, 0, 1);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR2(cUnit, (MipsOpCode)op, S2D(rlResult.lowReg, rlResult.highReg),
+ srcReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ rlDest = oatGetDest(cUnit, mir, 0);
+ rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
+ newLIR2(cUnit, (MipsOpCode)op, rlResult.lowReg, srcReg);
+ storeValue(cUnit, rlDest, rlResult);
+ }
+ return false;
#else
- return genConversionPortable(cUnit, mir);
+ return genConversionPortable(cUnit, mir);
#endif
}
static bool genCmpFP(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
RegLocation rlSrc1, RegLocation rlSrc2)
{
- bool wide = true;
- int offset;
+ bool wide = true;
+ int offset;
- switch (mir->dalvikInsn.opcode) {
- case Instruction::CMPL_FLOAT:
- offset = ENTRYPOINT_OFFSET(pCmplFloat);
- wide = false;
- break;
- case Instruction::CMPG_FLOAT:
- offset = ENTRYPOINT_OFFSET(pCmpgFloat);
- wide = false;
- break;
- case Instruction::CMPL_DOUBLE:
- offset = ENTRYPOINT_OFFSET(pCmplDouble);
- break;
- case Instruction::CMPG_DOUBLE:
- offset = ENTRYPOINT_OFFSET(pCmpgDouble);
- break;
- default:
- return true;
- }
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit);
- if (wide) {
- loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
- loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
- } else {
- loadValueDirectFixed(cUnit, rlSrc1, rARG0);
- loadValueDirectFixed(cUnit, rlSrc2, rARG1);
- }
- int rTgt = loadHelper(cUnit, offset);
- opReg(cUnit, kOpBlx, rTgt);
- RegLocation rlResult = oatGetReturn(cUnit, false);
- storeValue(cUnit, rlDest, rlResult);
- return false;
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::CMPL_FLOAT:
+ offset = ENTRYPOINT_OFFSET(pCmplFloat);
+ wide = false;
+ break;
+ case Instruction::CMPG_FLOAT:
+ offset = ENTRYPOINT_OFFSET(pCmpgFloat);
+ wide = false;
+ break;
+ case Instruction::CMPL_DOUBLE:
+ offset = ENTRYPOINT_OFFSET(pCmplDouble);
+ break;
+ case Instruction::CMPG_DOUBLE:
+ offset = ENTRYPOINT_OFFSET(pCmpgDouble);
+ break;
+ default:
+ return true;
+ }
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit);
+ if (wide) {
+ loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
+ } else {
+ loadValueDirectFixed(cUnit, rlSrc1, rARG0);
+ loadValueDirectFixed(cUnit, rlSrc2, rARG1);
+ }
+ int rTgt = loadHelper(cUnit, offset);
+ opReg(cUnit, kOpBlx, rTgt);
+ RegLocation rlResult = oatGetReturn(cUnit, false);
+ storeValue(cUnit, rlDest, rlResult);
+ return false;
}
} // namespace art
diff --git a/src/compiler/codegen/mips/Mips32/Factory.cc b/src/compiler/codegen/mips/Mips32/Factory.cc
index 1162702..0a7dd9d 100644
--- a/src/compiler/codegen/mips/Mips32/Factory.cc
+++ b/src/compiler/codegen/mips/Mips32/Factory.cc
@@ -52,32 +52,32 @@
#ifdef __mips_hard_float
LIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
{
- int opcode;
- /* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(DOUBLEREG(rDest),DOUBLEREG(rSrc));
- if (DOUBLEREG(rDest)) {
- opcode = kMipsFmovd;
+ int opcode;
+ /* must be both DOUBLE or both not DOUBLE */
+ DCHECK_EQ(DOUBLEREG(rDest),DOUBLEREG(rSrc));
+ if (DOUBLEREG(rDest)) {
+ opcode = kMipsFmovd;
+ } else {
+ if (SINGLEREG(rDest)) {
+ if (SINGLEREG(rSrc)) {
+ opcode = kMipsFmovs;
+ } else {
+ /* note the operands are swapped for the mtc1 instr */
+ int tOpnd = rSrc;
+ rSrc = rDest;
+ rDest = tOpnd;
+ opcode = kMipsMtc1;
+ }
} else {
- if (SINGLEREG(rDest)) {
- if (SINGLEREG(rSrc)) {
- opcode = kMipsFmovs;
- } else {
- /* note the operands are swapped for the mtc1 instr */
- int tOpnd = rSrc;
- rSrc = rDest;
- rDest = tOpnd;
- opcode = kMipsMtc1;
- }
- } else {
- DCHECK(SINGLEREG(rSrc));
- opcode = kMipsMfc1;
- }
+ DCHECK(SINGLEREG(rSrc));
+ opcode = kMipsMfc1;
}
- LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rSrc, rDest);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
+ }
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rSrc, rDest);
+ if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
}
#endif
@@ -90,445 +90,443 @@
* 1) rDest is freshly returned from oatAllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest,
- int value)
+LIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest, int value)
{
- LIR *res;
+ LIR *res;
#ifdef __mips_hard_float
- int rDestSave = rDest;
- int isFpReg = FPREG(rDest);
- if (isFpReg) {
- DCHECK(SINGLEREG(rDest));
- rDest = oatAllocTemp(cUnit);
- }
+ int rDestSave = rDest;
+ int isFpReg = FPREG(rDest);
+ if (isFpReg) {
+ DCHECK(SINGLEREG(rDest));
+ rDest = oatAllocTemp(cUnit);
+ }
#endif
- /* See if the value can be constructed cheaply */
- if (value == 0) {
- res = newLIR2(cUnit, kMipsMove, rDest, r_ZERO);
- } else if ((value > 0) && (value <= 65535)) {
- res = newLIR3(cUnit, kMipsOri, rDest, r_ZERO, value);
- } else if ((value < 0) && (value >= -32768)) {
- res = newLIR3(cUnit, kMipsAddiu, rDest, r_ZERO, value);
- } else {
- res = newLIR2(cUnit, kMipsLui, rDest, value>>16);
- if (value & 0xffff)
- newLIR3(cUnit, kMipsOri, rDest, rDest, value);
- }
+ /* See if the value can be constructed cheaply */
+ if (value == 0) {
+ res = newLIR2(cUnit, kMipsMove, rDest, r_ZERO);
+ } else if ((value > 0) && (value <= 65535)) {
+ res = newLIR3(cUnit, kMipsOri, rDest, r_ZERO, value);
+ } else if ((value < 0) && (value >= -32768)) {
+ res = newLIR3(cUnit, kMipsAddiu, rDest, r_ZERO, value);
+ } else {
+ res = newLIR2(cUnit, kMipsLui, rDest, value>>16);
+ if (value & 0xffff)
+ newLIR3(cUnit, kMipsOri, rDest, rDest, value);
+ }
#ifdef __mips_hard_float
- if (isFpReg) {
- newLIR2(cUnit, kMipsMtc1, rDest, rDestSave);
- oatFreeTemp(cUnit, rDest);
- }
+ if (isFpReg) {
+ newLIR2(cUnit, kMipsMtc1, rDest, rDestSave);
+ oatFreeTemp(cUnit, rDest);
+ }
#endif
- return res;
+ return res;
}
LIR *opBranchUnconditional(CompilationUnit *cUnit, OpKind op)
{
- DCHECK_EQ(op, kOpUncondBr);
- return newLIR1(cUnit, kMipsB, 0 /* offset to be patched */ );
+ DCHECK_EQ(op, kOpUncondBr);
+ return newLIR1(cUnit, kMipsB, 0 /* offset to be patched */ );
}
LIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask);
LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc)
{
- MipsOpCode opcode = kMipsNop;
- switch (op) {
- case kOpBlx:
- opcode = kMipsJalr;
- break;
- case kOpBx:
- return newLIR1(cUnit, kMipsJr, rDestSrc);
- break;
- default:
- LOG(FATAL) << "Bad case in opReg";
- }
- return newLIR2(cUnit, opcode, r_RA, rDestSrc);
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpBlx:
+ opcode = kMipsJalr;
+ break;
+ case kOpBx:
+ return newLIR1(cUnit, kMipsJr, rDestSrc);
+ break;
+ default:
+ LOG(FATAL) << "Bad case in opReg";
+ }
+ return newLIR2(cUnit, opcode, r_RA, rDestSrc);
}
LIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
- int rSrc1, int value);
+ int rSrc1, int value);
LIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
- int value)
+ int value)
{
- LIR *res;
- bool neg = (value < 0);
- int absValue = (neg) ? -value : value;
- bool shortForm = (absValue & 0xff) == absValue;
- MipsOpCode opcode = kMipsNop;
- switch (op) {
- case kOpAdd:
- return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
- break;
- case kOpSub:
- return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
- break;
- default:
- LOG(FATAL) << "Bad case in opRegImm";
- break;
- }
- if (shortForm)
- res = newLIR2(cUnit, opcode, rDestSrc1, absValue);
- else {
- int rScratch = oatAllocTemp(cUnit);
- res = loadConstant(cUnit, rScratch, value);
- if (op == kOpCmp)
- newLIR2(cUnit, opcode, rDestSrc1, rScratch);
- else
- newLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rScratch);
- }
- return res;
+ LIR *res;
+ bool neg = (value < 0);
+ int absValue = (neg) ? -value : value;
+ bool shortForm = (absValue & 0xff) == absValue;
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpAdd:
+ return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ break;
+ case kOpSub:
+ return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ break;
+ default:
+ LOG(FATAL) << "Bad case in opRegImm";
+ break;
+ }
+ if (shortForm)
+ res = newLIR2(cUnit, opcode, rDestSrc1, absValue);
+ else {
+ int rScratch = oatAllocTemp(cUnit);
+ res = loadConstant(cUnit, rScratch, value);
+ if (op == kOpCmp)
+ newLIR2(cUnit, opcode, rDestSrc1, rScratch);
+ else
+ newLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rScratch);
+ }
+ return res;
}
LIR *opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest,
- int rSrc1, int rSrc2)
+ int rSrc1, int rSrc2)
{
- MipsOpCode opcode = kMipsNop;
- switch (op) {
- case kOpAdd:
- opcode = kMipsAddu;
- break;
- case kOpSub:
- opcode = kMipsSubu;
- break;
- case kOpAnd:
- opcode = kMipsAnd;
- break;
- case kOpMul:
- opcode = kMipsMul;
- break;
- case kOpOr:
- opcode = kMipsOr;
- break;
- case kOpXor:
- opcode = kMipsXor;
- break;
- case kOpLsl:
- opcode = kMipsSllv;
- break;
- case kOpLsr:
- opcode = kMipsSrlv;
- break;
- case kOpAsr:
- opcode = kMipsSrav;
- break;
- case kOpAdc:
- case kOpSbc:
- LOG(FATAL) << "No carry bit on MIPS";
- break;
- default:
- LOG(FATAL) << "bad case in opRegRegReg";
- break;
- }
- return newLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpAdd:
+ opcode = kMipsAddu;
+ break;
+ case kOpSub:
+ opcode = kMipsSubu;
+ break;
+ case kOpAnd:
+ opcode = kMipsAnd;
+ break;
+ case kOpMul:
+ opcode = kMipsMul;
+ break;
+ case kOpOr:
+ opcode = kMipsOr;
+ break;
+ case kOpXor:
+ opcode = kMipsXor;
+ break;
+ case kOpLsl:
+ opcode = kMipsSllv;
+ break;
+ case kOpLsr:
+ opcode = kMipsSrlv;
+ break;
+ case kOpAsr:
+ opcode = kMipsSrav;
+ break;
+ case kOpAdc:
+ case kOpSbc:
+ LOG(FATAL) << "No carry bit on MIPS";
+ break;
+ default:
+ LOG(FATAL) << "bad case in opRegRegReg";
+ break;
+ }
+ return newLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
}
LIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest,
- int rSrc1, int value)
+ int rSrc1, int value)
{
- LIR *res;
- MipsOpCode opcode = kMipsNop;
- bool shortForm = true;
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ bool shortForm = true;
- switch (op) {
- case kOpAdd:
- if (IS_SIMM16(value)) {
- opcode = kMipsAddiu;
- }
- else {
- shortForm = false;
- opcode = kMipsAddu;
- }
- break;
- case kOpSub:
- if (IS_SIMM16((-value))) {
- value = -value;
- opcode = kMipsAddiu;
- }
- else {
- shortForm = false;
- opcode = kMipsSubu;
- }
- break;
- case kOpLsl:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSll;
- break;
- case kOpLsr:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSrl;
- break;
- case kOpAsr:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSra;
- break;
- case kOpAnd:
- if (IS_UIMM16((value))) {
- opcode = kMipsAndi;
- }
- else {
- shortForm = false;
- opcode = kMipsAnd;
- }
- break;
- case kOpOr:
- if (IS_UIMM16((value))) {
- opcode = kMipsOri;
- }
- else {
- shortForm = false;
- opcode = kMipsOr;
- }
- break;
- case kOpXor:
- if (IS_UIMM16((value))) {
- opcode = kMipsXori;
- }
- else {
- shortForm = false;
- opcode = kMipsXor;
- }
- break;
- case kOpMul:
- shortForm = false;
- opcode = kMipsMul;
- break;
- default:
- LOG(FATAL) << "Bad case in opRegRegImm";
- break;
- }
+ switch (op) {
+ case kOpAdd:
+ if (IS_SIMM16(value)) {
+ opcode = kMipsAddiu;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsAddu;
+ }
+ break;
+ case kOpSub:
+ if (IS_SIMM16((-value))) {
+ value = -value;
+ opcode = kMipsAddiu;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsSubu;
+ }
+ break;
+ case kOpLsl:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSll;
+ break;
+ case kOpLsr:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSrl;
+ break;
+ case kOpAsr:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSra;
+ break;
+ case kOpAnd:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsAndi;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsAnd;
+ }
+ break;
+ case kOpOr:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsOri;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsOr;
+ }
+ break;
+ case kOpXor:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsXori;
+ }
+ else {
+ shortForm = false;
+ opcode = kMipsXor;
+ }
+ break;
+ case kOpMul:
+ shortForm = false;
+ opcode = kMipsMul;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in opRegRegImm";
+ break;
+ }
- if (shortForm)
- res = newLIR3(cUnit, opcode, rDest, rSrc1, value);
- else {
- if (rDest != rSrc1) {
- res = loadConstant(cUnit, rDest, value);
- newLIR3(cUnit, opcode, rDest, rSrc1, rDest);
- } else {
- int rScratch = oatAllocTemp(cUnit);
- res = loadConstant(cUnit, rScratch, value);
- newLIR3(cUnit, opcode, rDest, rSrc1, rScratch);
- }
+ if (shortForm)
+ res = newLIR3(cUnit, opcode, rDest, rSrc1, value);
+ else {
+ if (rDest != rSrc1) {
+ res = loadConstant(cUnit, rDest, value);
+ newLIR3(cUnit, opcode, rDest, rSrc1, rDest);
+ } else {
+ int rScratch = oatAllocTemp(cUnit);
+ res = loadConstant(cUnit, rScratch, value);
+ newLIR3(cUnit, opcode, rDest, rSrc1, rScratch);
}
- return res;
+ }
+ return res;
}
-LIR *opRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1,
- int rSrc2)
+LIR *opRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1, int rSrc2)
{
- MipsOpCode opcode = kMipsNop;
- LIR *res;
- switch (op) {
- case kOpMov:
- opcode = kMipsMove;
- break;
- case kOpMvn:
- return newLIR3(cUnit, kMipsNor, rDestSrc1, rSrc2, r_ZERO);
- case kOpNeg:
- return newLIR3(cUnit, kMipsSubu, rDestSrc1, r_ZERO, rSrc2);
- case kOpAdd:
- case kOpAnd:
- case kOpMul:
- case kOpOr:
- case kOpSub:
- case kOpXor:
- return opRegRegReg(cUnit, op, rDestSrc1, rDestSrc1, rSrc2);
- case kOp2Byte:
+ MipsOpCode opcode = kMipsNop;
+ LIR *res;
+ switch (op) {
+ case kOpMov:
+ opcode = kMipsMove;
+ break;
+ case kOpMvn:
+ return newLIR3(cUnit, kMipsNor, rDestSrc1, rSrc2, r_ZERO);
+ case kOpNeg:
+ return newLIR3(cUnit, kMipsSubu, rDestSrc1, r_ZERO, rSrc2);
+ case kOpAdd:
+ case kOpAnd:
+ case kOpMul:
+ case kOpOr:
+ case kOpSub:
+ case kOpXor:
+ return opRegRegReg(cUnit, op, rDestSrc1, rDestSrc1, rSrc2);
+ case kOp2Byte:
#if __mips_isa_rev>=2
- res = newLIR2(cUnit, kMipsSeb, rDestSrc1, rSrc2);
+ res = newLIR2(cUnit, kMipsSeb, rDestSrc1, rSrc2);
#else
- res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 24);
- opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 24);
+ res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 24);
+ opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 24);
#endif
- return res;
- case kOp2Short:
+ return res;
+ case kOp2Short:
#if __mips_isa_rev>=2
- res = newLIR2(cUnit, kMipsSeh, rDestSrc1, rSrc2);
+ res = newLIR2(cUnit, kMipsSeh, rDestSrc1, rSrc2);
#else
- res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 16);
- opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 16);
+ res = opRegRegImm(cUnit, kOpLsl, rDestSrc1, rSrc2, 16);
+ opRegRegImm(cUnit, kOpAsr, rDestSrc1, rDestSrc1, 16);
#endif
- return res;
- case kOp2Char:
- return newLIR3(cUnit, kMipsAndi, rDestSrc1, rSrc2, 0xFFFF);
- default:
- LOG(FATAL) << "Bad case in opRegReg";
- break;
- }
- return newLIR2(cUnit, opcode, rDestSrc1, rSrc2);
+ return res;
+ case kOp2Char:
+ return newLIR3(cUnit, kMipsAndi, rDestSrc1, rSrc2, 0xFFFF);
+ default:
+ LOG(FATAL) << "Bad case in opRegReg";
+ break;
+ }
+ return newLIR2(cUnit, opcode, rDestSrc1, rSrc2);
}
LIR *loadConstantValueWide(CompilationUnit *cUnit, int rDestLo,
- int rDestHi, int valLo, int valHi)
+ int rDestHi, int valLo, int valHi)
{
- LIR *res;
- res = loadConstantNoClobber(cUnit, rDestLo, valLo);
- loadConstantNoClobber(cUnit, rDestHi, valHi);
- return res;
+ LIR *res;
+ res = loadConstantNoClobber(cUnit, rDestLo, valLo);
+ loadConstantNoClobber(cUnit, rDestHi, valHi);
+ return res;
}
/* Load value from base + scaled index. */
LIR *loadBaseIndexed(CompilationUnit *cUnit, int rBase,
- int rIndex, int rDest, int scale, OpSize size)
+ int rIndex, int rDest, int scale, OpSize size)
{
- LIR *first = NULL;
- LIR *res;
- MipsOpCode opcode = kMipsNop;
- int tReg = oatAllocTemp(cUnit);
+ LIR *first = NULL;
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ int tReg = oatAllocTemp(cUnit);
#ifdef __mips_hard_float
- if (FPREG(rDest)) {
- DCHECK(SINGLEREG(rDest));
- DCHECK((size == kWord) || (size == kSingle));
- size = kSingle;
- } else {
- if (size == kSingle)
- size = kWord;
- }
+ if (FPREG(rDest)) {
+ DCHECK(SINGLEREG(rDest));
+ DCHECK((size == kWord) || (size == kSingle));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
#endif
- if (!scale) {
- first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
- } else {
- first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
- newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
- }
+ if (!scale) {
+ first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+ } else {
+ first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
+ newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+ }
- switch (size) {
+ switch (size) {
#ifdef __mips_hard_float
- case kSingle:
- opcode = kMipsFlwc1;
- break;
+ case kSingle:
+ opcode = kMipsFlwc1;
+ break;
#endif
- case kWord:
- opcode = kMipsLw;
- break;
- case kUnsignedHalf:
- opcode = kMipsLhu;
- break;
- case kSignedHalf:
- opcode = kMipsLh;
- break;
- case kUnsignedByte:
- opcode = kMipsLbu;
- break;
- case kSignedByte:
- opcode = kMipsLb;
- break;
- default:
- LOG(FATAL) << "Bad case in loadBaseIndexed";
- }
+ case kWord:
+ opcode = kMipsLw;
+ break;
+ case kUnsignedHalf:
+ opcode = kMipsLhu;
+ break;
+ case kSignedHalf:
+ opcode = kMipsLh;
+ break;
+ case kUnsignedByte:
+ opcode = kMipsLbu;
+ break;
+ case kSignedByte:
+ opcode = kMipsLb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in loadBaseIndexed";
+ }
- res = newLIR3(cUnit, opcode, rDest, 0, tReg);
- oatFreeTemp(cUnit, tReg);
- return (first) ? first : res;
+ res = newLIR3(cUnit, opcode, rDest, 0, tReg);
+ oatFreeTemp(cUnit, tReg);
+ return (first) ? first : res;
}
/* store value base base + scaled index. */
LIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase,
- int rIndex, int rSrc, int scale, OpSize size)
+ int rIndex, int rSrc, int scale, OpSize size)
{
- LIR *first = NULL;
- LIR *res;
- MipsOpCode opcode = kMipsNop;
- int rNewIndex = rIndex;
- int tReg = oatAllocTemp(cUnit);
+ LIR *first = NULL;
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ int rNewIndex = rIndex;
+ int tReg = oatAllocTemp(cUnit);
#ifdef __mips_hard_float
- if (FPREG(rSrc)) {
- DCHECK(SINGLEREG(rSrc));
- DCHECK((size == kWord) || (size == kSingle));
- size = kSingle;
- } else {
- if (size == kSingle)
- size = kWord;
- }
+ if (FPREG(rSrc)) {
+ DCHECK(SINGLEREG(rSrc));
+ DCHECK((size == kWord) || (size == kSingle));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
#endif
- if (!scale) {
- first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
- } else {
- first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
- newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
- }
+ if (!scale) {
+ first = newLIR3(cUnit, kMipsAddu, tReg , rBase, rIndex);
+ } else {
+ first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
+ newLIR3(cUnit, kMipsAddu, tReg , rBase, tReg);
+ }
- switch (size) {
+ switch (size) {
#ifdef __mips_hard_float
- case kSingle:
- opcode = kMipsFswc1;
- break;
+ case kSingle:
+ opcode = kMipsFswc1;
+ break;
#endif
- case kWord:
- opcode = kMipsSw;
- break;
- case kUnsignedHalf:
- case kSignedHalf:
- opcode = kMipsSh;
- break;
- case kUnsignedByte:
- case kSignedByte:
- opcode = kMipsSb;
- break;
- default:
- LOG(FATAL) << "Bad case in storeBaseIndexed";
- }
- res = newLIR3(cUnit, opcode, rSrc, 0, tReg);
- oatFreeTemp(cUnit, rNewIndex);
- return first;
+ case kWord:
+ opcode = kMipsSw;
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMipsSh;
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMipsSb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in storeBaseIndexed";
+ }
+ res = newLIR3(cUnit, opcode, rSrc, 0, tReg);
+ oatFreeTemp(cUnit, rNewIndex);
+ return first;
}
LIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask)
{
- int i;
- int loadCnt = 0;
- LIR *res = NULL ;
- genBarrier(cUnit);
+ int i;
+ int loadCnt = 0;
+ LIR *res = NULL ;
+ genBarrier(cUnit);
- for (i = 0; i < 8; i++, rMask >>= 1) {
- if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
- newLIR3(cUnit, kMipsLw, i+r_A0, loadCnt*4, rBase);
- loadCnt++;
- }
+ for (i = 0; i < 8; i++, rMask >>= 1) {
+ if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
+ newLIR3(cUnit, kMipsLw, i+r_A0, loadCnt*4, rBase);
+ loadCnt++;
}
+ }
- if (loadCnt) {/* increment after */
- newLIR3(cUnit, kMipsAddiu, rBase, rBase, loadCnt*4);
- }
+ if (loadCnt) {/* increment after */
+ newLIR3(cUnit, kMipsAddiu, rBase, rBase, loadCnt*4);
+ }
- genBarrier(cUnit);
- return res; /* NULL always returned which should be ok since no callers use it */
+ genBarrier(cUnit);
+ return res; /* NULL always returned which should be ok since no callers use it */
}
LIR *storeMultiple(CompilationUnit *cUnit, int rBase, int rMask)
{
- int i;
- int storeCnt = 0;
- LIR *res = NULL ;
- genBarrier(cUnit);
+ int i;
+ int storeCnt = 0;
+ LIR *res = NULL ;
+ genBarrier(cUnit);
- for (i = 0; i < 8; i++, rMask >>= 1) {
- if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
- newLIR3(cUnit, kMipsSw, i+r_A0, storeCnt*4, rBase);
- storeCnt++;
- }
+ for (i = 0; i < 8; i++, rMask >>= 1) {
+ if (rMask & 0x1) { /* map r0 to MIPS r_A0 */
+ newLIR3(cUnit, kMipsSw, i+r_A0, storeCnt*4, rBase);
+ storeCnt++;
}
+ }
- if (storeCnt) { /* increment after */
- newLIR3(cUnit, kMipsAddiu, rBase, rBase, storeCnt*4);
- }
+ if (storeCnt) { /* increment after */
+ newLIR3(cUnit, kMipsAddiu, rBase, rBase, storeCnt*4);
+ }
- genBarrier(cUnit);
- return res; /* NULL always returned which should be ok since no callers use it */
+ genBarrier(cUnit);
+ return res; /* NULL always returned which should be ok since no callers use it */
}
LIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase,
- int displacement, int rDest, int rDestHi,
- OpSize size, int sReg)
+ int displacement, int rDest, int rDestHi,
+ OpSize size, int sReg)
/*
* Load value from base + displacement. Optionally perform null check
* on base (which must have an associated sReg and MIR). If not
@@ -538,221 +536,222 @@
* rlp and then restore.
*/
{
- LIR *res;
- LIR *load = NULL;
- LIR *load2 = NULL;
- MipsOpCode opcode = kMipsNop;
- bool shortForm = IS_SIMM16(displacement);
- bool pair = false;
+ LIR *res;
+ LIR *load = NULL;
+ LIR *load2 = NULL;
+ MipsOpCode opcode = kMipsNop;
+ bool shortForm = IS_SIMM16(displacement);
+ bool pair = false;
- switch (size) {
- case kLong:
- case kDouble:
- pair = true;
- opcode = kMipsLw;
+ switch (size) {
+ case kLong:
+ case kDouble:
+ pair = true;
+ opcode = kMipsLw;
#ifdef __mips_hard_float
- if (FPREG(rDest)) {
- opcode = kMipsFlwc1;
- if (DOUBLEREG(rDest)) {
- rDest = rDest - FP_DOUBLE;
- } else {
- DCHECK(FPREG(rDestHi));
- DCHECK(rDest == (rDestHi - 1));
- }
- rDestHi = rDest + 1;
- }
-#endif
- shortForm = IS_SIMM16_2WORD(displacement);
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case kWord:
- case kSingle:
- opcode = kMipsLw;
-#ifdef __mips_hard_float
- if (FPREG(rDest)) {
- opcode = kMipsFlwc1;
- DCHECK(SINGLEREG(rDest));
- }
-#endif
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case kUnsignedHalf:
- opcode = kMipsLhu;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kSignedHalf:
- opcode = kMipsLh;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kUnsignedByte:
- opcode = kMipsLbu;
- break;
- case kSignedByte:
- opcode = kMipsLb;
- break;
- default:
- LOG(FATAL) << "Bad case in loadBaseIndexedBody";
- }
-
- if (shortForm) {
- if (!pair) {
- load = res = newLIR3(cUnit, opcode, rDest, displacement, rBase);
+ if (FPREG(rDest)) {
+ opcode = kMipsFlwc1;
+ if (DOUBLEREG(rDest)) {
+ rDest = rDest - FP_DOUBLE;
} else {
- load = res = newLIR3(cUnit, opcode, rDest, displacement + LOWORD_OFFSET, rBase);
- load2 = newLIR3(cUnit, opcode, rDestHi, displacement + HIWORD_OFFSET, rBase);
+ DCHECK(FPREG(rDestHi));
+ DCHECK(rDest == (rDestHi - 1));
}
+ rDestHi = rDest + 1;
+ }
+#endif
+ shortForm = IS_SIMM16_2WORD(displacement);
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kWord:
+ case kSingle:
+ opcode = kMipsLw;
+#ifdef __mips_hard_float
+ if (FPREG(rDest)) {
+ opcode = kMipsFlwc1;
+ DCHECK(SINGLEREG(rDest));
+ }
+#endif
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ opcode = kMipsLhu;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kSignedHalf:
+ opcode = kMipsLh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ opcode = kMipsLbu;
+ break;
+ case kSignedByte:
+ opcode = kMipsLb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in loadBaseIndexedBody";
+ }
+
+ if (shortForm) {
+ if (!pair) {
+ load = res = newLIR3(cUnit, opcode, rDest, displacement, rBase);
} else {
- if (pair) {
- int rTmp = oatAllocFreeTemp(cUnit);
- res = opRegRegImm(cUnit, kOpAdd, rTmp, rBase, displacement);
- load = newLIR3(cUnit, opcode, rDest, LOWORD_OFFSET, rTmp);
- load2 = newLIR3(cUnit, opcode, rDestHi, HIWORD_OFFSET, rTmp);
- oatFreeTemp(cUnit, rTmp);
- } else {
- int rTmp = (rBase == rDest) ? oatAllocFreeTemp(cUnit)
- : rDest;
- res = loadConstant(cUnit, rTmp, displacement);
- load = newLIR3(cUnit, opcode, rDest, rBase, rTmp);
- if (rTmp != rDest)
- oatFreeTemp(cUnit, rTmp);
- }
+ load = res = newLIR3(cUnit, opcode, rDest,
+ displacement + LOWORD_OFFSET, rBase);
+ load2 = newLIR3(cUnit, opcode, rDestHi,
+ displacement + HIWORD_OFFSET, rBase);
}
+ } else {
+ if (pair) {
+ int rTmp = oatAllocFreeTemp(cUnit);
+ res = opRegRegImm(cUnit, kOpAdd, rTmp, rBase, displacement);
+ load = newLIR3(cUnit, opcode, rDest, LOWORD_OFFSET, rTmp);
+ load2 = newLIR3(cUnit, opcode, rDestHi, HIWORD_OFFSET, rTmp);
+ oatFreeTemp(cUnit, rTmp);
+ } else {
+ int rTmp = (rBase == rDest) ? oatAllocFreeTemp(cUnit) : rDest;
+ res = loadConstant(cUnit, rTmp, displacement);
+ load = newLIR3(cUnit, opcode, rDest, rBase, rTmp);
+ if (rTmp != rDest)
+ oatFreeTemp(cUnit, rTmp);
+ }
+ }
- if (rBase == rSP) {
- annotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
- true /* isLoad */, pair /* is64bit */);
- if (pair) {
- annotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
- true /* isLoad */, pair /* is64bit */);
- }
+ if (rBase == rSP) {
+ annotateDalvikRegAccess(load,
+ (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ true /* isLoad */, pair /* is64bit */);
+ if (pair) {
+ annotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+ true /* isLoad */, pair /* is64bit */);
}
- return load;
+ }
+ return load;
}
LIR *loadBaseDisp(CompilationUnit *cUnit, MIR *mir, int rBase,
- int displacement, int rDest, OpSize size,
- int sReg)
+ int displacement, int rDest, OpSize size, int sReg)
{
- return loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, -1,
- size, sReg);
+ return loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, -1,
+ size, sReg);
}
LIR *loadBaseDispWide(CompilationUnit *cUnit, MIR *mir, int rBase,
- int displacement, int rDestLo, int rDestHi,
- int sReg)
+ int displacement, int rDestLo, int rDestHi, int sReg)
{
- return loadBaseDispBody(cUnit, mir, rBase, displacement, rDestLo, rDestHi,
- kLong, sReg);
+ return loadBaseDispBody(cUnit, mir, rBase, displacement, rDestLo, rDestHi,
+ kLong, sReg);
}
LIR *storeBaseDispBody(CompilationUnit *cUnit, int rBase,
- int displacement, int rSrc, int rSrcHi,
- OpSize size)
+ int displacement, int rSrc, int rSrcHi, OpSize size)
{
- LIR *res;
- LIR *store = NULL;
- LIR *store2 = NULL;
- MipsOpCode opcode = kMipsNop;
- bool shortForm = IS_SIMM16(displacement);
- bool pair = false;
+ LIR *res;
+ LIR *store = NULL;
+ LIR *store2 = NULL;
+ MipsOpCode opcode = kMipsNop;
+ bool shortForm = IS_SIMM16(displacement);
+ bool pair = false;
- switch (size) {
- case kLong:
- case kDouble:
- pair = true;
- opcode = kMipsSw;
+ switch (size) {
+ case kLong:
+ case kDouble:
+ pair = true;
+ opcode = kMipsSw;
#ifdef __mips_hard_float
- if (FPREG(rSrc)) {
- opcode = kMipsFswc1;
- if (DOUBLEREG(rSrc)) {
- rSrc = rSrc - FP_DOUBLE;
- } else {
- DCHECK(FPREG(rSrcHi));
- DCHECK_EQ(rSrc, (rSrcHi - 1));
- }
- rSrcHi = rSrc + 1;
- }
-#endif
- shortForm = IS_SIMM16_2WORD(displacement);
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case kWord:
- case kSingle:
- opcode = kMipsSw;
-#ifdef __mips_hard_float
- if (FPREG(rSrc)) {
- opcode = kMipsFswc1;
- DCHECK(SINGLEREG(rSrc));
- }
-#endif
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case kUnsignedHalf:
- case kSignedHalf:
- opcode = kMipsSh;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kUnsignedByte:
- case kSignedByte:
- opcode = kMipsSb;
- break;
- default:
- LOG(FATAL) << "Bad case in storeBaseIndexedBody";
- }
-
- if (shortForm) {
- if (!pair) {
- store = res = newLIR3(cUnit, opcode, rSrc, displacement, rBase);
+ if (FPREG(rSrc)) {
+ opcode = kMipsFswc1;
+ if (DOUBLEREG(rSrc)) {
+ rSrc = rSrc - FP_DOUBLE;
} else {
- store = res = newLIR3(cUnit, opcode, rSrc, displacement + LOWORD_OFFSET, rBase);
- store2 = newLIR3(cUnit, opcode, rSrcHi, displacement + HIWORD_OFFSET, rBase);
+ DCHECK(FPREG(rSrcHi));
+ DCHECK_EQ(rSrc, (rSrcHi - 1));
}
+ rSrcHi = rSrc + 1;
+ }
+#endif
+ shortForm = IS_SIMM16_2WORD(displacement);
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kWord:
+ case kSingle:
+ opcode = kMipsSw;
+#ifdef __mips_hard_float
+ if (FPREG(rSrc)) {
+ opcode = kMipsFswc1;
+ DCHECK(SINGLEREG(rSrc));
+ }
+#endif
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMipsSh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMipsSb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in storeBaseIndexedBody";
+ }
+
+ if (shortForm) {
+ if (!pair) {
+ store = res = newLIR3(cUnit, opcode, rSrc, displacement, rBase);
} else {
- int rScratch = oatAllocTemp(cUnit);
- res = opRegRegImm(cUnit, kOpAdd, rScratch, rBase, displacement);
- if (!pair) {
- store = newLIR3(cUnit, opcode, rSrc, 0, rScratch);
- } else {
- store = newLIR3(cUnit, opcode, rSrc, LOWORD_OFFSET, rScratch);
- store2 = newLIR3(cUnit, opcode, rSrcHi, HIWORD_OFFSET, rScratch);
- }
- oatFreeTemp(cUnit, rScratch);
+ store = res = newLIR3(cUnit, opcode, rSrc, displacement + LOWORD_OFFSET,
+ rBase);
+ store2 = newLIR3(cUnit, opcode, rSrcHi, displacement + HIWORD_OFFSET,
+ rBase);
}
-
- if (rBase == rSP) {
- annotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
- false /* isLoad */, pair /* is64bit */);
- if (pair) {
- annotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
- false /* isLoad */, pair /* is64bit */);
- }
+ } else {
+ int rScratch = oatAllocTemp(cUnit);
+ res = opRegRegImm(cUnit, kOpAdd, rScratch, rBase, displacement);
+ if (!pair) {
+ store = newLIR3(cUnit, opcode, rSrc, 0, rScratch);
+ } else {
+ store = newLIR3(cUnit, opcode, rSrc, LOWORD_OFFSET, rScratch);
+ store2 = newLIR3(cUnit, opcode, rSrcHi, HIWORD_OFFSET, rScratch);
}
+ oatFreeTemp(cUnit, rScratch);
+ }
- return res;
+ if (rBase == rSP) {
+ annotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0))
+ >> 2, false /* isLoad */, pair /* is64bit */);
+ if (pair) {
+ annotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+ false /* isLoad */, pair /* is64bit */);
+ }
+ }
+
+ return res;
}
LIR *storeBaseDisp(CompilationUnit *cUnit, int rBase,
- int displacement, int rSrc, OpSize size)
+ int displacement, int rSrc, OpSize size)
{
- return storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
+ return storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
}
LIR *storeBaseDispWide(CompilationUnit *cUnit, int rBase,
- int displacement, int rSrcLo, int rSrcHi)
+ int displacement, int rSrcLo, int rSrcHi)
{
- return storeBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
+ return storeBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
}
void storePair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
{
- storeWordDisp(cUnit, base, LOWORD_OFFSET, lowReg);
- storeWordDisp(cUnit, base, HIWORD_OFFSET, highReg);
+ storeWordDisp(cUnit, base, LOWORD_OFFSET, lowReg);
+ storeWordDisp(cUnit, base, HIWORD_OFFSET, highReg);
}
void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
{
- loadWordDisp(cUnit, base, LOWORD_OFFSET , lowReg);
- loadWordDisp(cUnit, base, HIWORD_OFFSET , highReg);
+ loadWordDisp(cUnit, base, LOWORD_OFFSET , lowReg);
+ loadWordDisp(cUnit, base, HIWORD_OFFSET , highReg);
}
} // namespace art
diff --git a/src/compiler/codegen/mips/Mips32/Gen.cc b/src/compiler/codegen/mips/Mips32/Gen.cc
index ade2fd8..46c90f8 100644
--- a/src/compiler/codegen/mips/Mips32/Gen.cc
+++ b/src/compiler/codegen/mips/Mips32/Gen.cc
@@ -64,69 +64,69 @@
*
*/
void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
- LIR* labelList)
+ LIR* labelList)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
- if (cUnit->printMe) {
- dumpSparseSwitchTable(table);
- }
- // Add the table to the list - we'll process it later
- SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
- true, kAllocData);
- tabRec->table = table;
- tabRec->vaddr = mir->offset;
- int elements = table[1];
- tabRec->targets = (LIR* *)oatNew(cUnit, elements * sizeof(LIR*), true,
- kAllocLIR);
- oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ if (cUnit->printMe) {
+ dumpSparseSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
+ true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ int elements = table[1];
+ tabRec->targets = (LIR* *)oatNew(cUnit, elements * sizeof(LIR*), true,
+ kAllocLIR);
+ oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
- // The table is composed of 8-byte key/disp pairs
- int byteSize = elements * 8;
+ // The table is composed of 8-byte key/disp pairs
+ int byteSize = elements * 8;
- int sizeHi = byteSize >> 16;
- int sizeLo = byteSize & 0xffff;
+ int sizeHi = byteSize >> 16;
+ int sizeLo = byteSize & 0xffff;
- int rEnd = oatAllocTemp(cUnit);
- if (sizeHi) {
- newLIR2(cUnit, kMipsLui, rEnd, sizeHi);
- }
- // Must prevent code motion for the curr pc pair
- genBarrier(cUnit); // Scheduling barrier
- newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
- // Now, fill the branch delay slot
- if (sizeHi) {
- newLIR3(cUnit, kMipsOri, rEnd, rEnd, sizeLo);
- } else {
- newLIR3(cUnit, kMipsOri, rEnd, r_ZERO, sizeLo);
- }
- genBarrier(cUnit); // Scheduling barrier
+ int rEnd = oatAllocTemp(cUnit);
+ if (sizeHi) {
+ newLIR2(cUnit, kMipsLui, rEnd, sizeHi);
+ }
+ // Must prevent code motion for the curr pc pair
+ genBarrier(cUnit); // Scheduling barrier
+ newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot
+ if (sizeHi) {
+ newLIR3(cUnit, kMipsOri, rEnd, rEnd, sizeLo);
+ } else {
+ newLIR3(cUnit, kMipsOri, rEnd, r_ZERO, sizeLo);
+ }
+ genBarrier(cUnit); // Scheduling barrier
- // Construct BaseLabel and set up table base register
- LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
- // Remember base label so offsets can be computed later
- tabRec->anchor = baseLabel;
- int rBase = oatAllocTemp(cUnit);
- newLIR4(cUnit, kMipsDelta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
- opRegRegReg(cUnit, kOpAdd, rEnd, rEnd, rBase);
+ // Construct BaseLabel and set up table base register
+ LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later
+ tabRec->anchor = baseLabel;
+ int rBase = oatAllocTemp(cUnit);
+ newLIR4(cUnit, kMipsDelta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+ opRegRegReg(cUnit, kOpAdd, rEnd, rEnd, rBase);
- // Grab switch test value
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ // Grab switch test value
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- // Test loop
- int rKey = oatAllocTemp(cUnit);
- LIR* loopLabel = newLIR0(cUnit, kPseudoTargetLabel);
- LIR* exitBranch = opCmpBranch(cUnit , kCondEq, rBase, rEnd, NULL);
- loadWordDisp(cUnit, rBase, 0, rKey);
- opRegImm(cUnit, kOpAdd, rBase, 8);
- opCmpBranch(cUnit, kCondNe, rlSrc.lowReg, rKey, loopLabel);
- int rDisp = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rBase, -4, rDisp);
- opRegRegReg(cUnit, kOpAdd, r_RA, r_RA, rDisp);
- opReg(cUnit, kOpBx, r_RA);
+ // Test loop
+ int rKey = oatAllocTemp(cUnit);
+ LIR* loopLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ LIR* exitBranch = opCmpBranch(cUnit , kCondEq, rBase, rEnd, NULL);
+ loadWordDisp(cUnit, rBase, 0, rKey);
+ opRegImm(cUnit, kOpAdd, rBase, 8);
+ opCmpBranch(cUnit, kCondNe, rlSrc.lowReg, rKey, loopLabel);
+ int rDisp = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rBase, -4, rDisp);
+ opRegRegReg(cUnit, kOpAdd, r_RA, r_RA, rDisp);
+ opReg(cUnit, kOpBx, r_RA);
- // Loop exit
- LIR* exitLabel = newLIR0(cUnit, kPseudoTargetLabel);
- exitBranch->target = exitLabel;
+ // Loop exit
+ LIR* exitLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ exitBranch->target = exitLabel;
}
/*
@@ -144,75 +144,75 @@
*/
void genPackedSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
- if (cUnit->printMe) {
- dumpPackedSwitchTable(table);
- }
- // Add the table to the list - we'll process it later
- SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
- true, kAllocData);
- tabRec->table = table;
- tabRec->vaddr = mir->offset;
- int size = table[1];
- tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true,
- kAllocLIR);
- oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ if (cUnit->printMe) {
+ dumpPackedSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
+ true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ int size = table[1];
+ tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true,
+ kAllocLIR);
+ oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
- // Get the switch value
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ // Get the switch value
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- // Prepare the bias. If too big, handle 1st stage here
- int lowKey = s4FromSwitchData(&table[2]);
- bool largeBias = false;
- int rKey;
- if (lowKey == 0) {
- rKey = rlSrc.lowReg;
- } else if ((lowKey & 0xffff) != lowKey) {
- rKey = oatAllocTemp(cUnit);
- loadConstant(cUnit, rKey, lowKey);
- largeBias = true;
+ // Prepare the bias. If too big, handle 1st stage here
+ int lowKey = s4FromSwitchData(&table[2]);
+ bool largeBias = false;
+ int rKey;
+ if (lowKey == 0) {
+ rKey = rlSrc.lowReg;
+ } else if ((lowKey & 0xffff) != lowKey) {
+ rKey = oatAllocTemp(cUnit);
+ loadConstant(cUnit, rKey, lowKey);
+ largeBias = true;
+ } else {
+ rKey = oatAllocTemp(cUnit);
+ }
+
+ // Must prevent code motion for the curr pc pair
+ genBarrier(cUnit);
+ newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot with bias strip
+ if (lowKey == 0) {
+ newLIR0(cUnit, kMipsNop);
+ } else {
+ if (largeBias) {
+ opRegRegReg(cUnit, kOpSub, rKey, rlSrc.lowReg, rKey);
} else {
- rKey = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpSub, rKey, rlSrc.lowReg, lowKey);
}
+ }
+ genBarrier(cUnit); // Scheduling barrier
- // Must prevent code motion for the curr pc pair
- genBarrier(cUnit);
- newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
- // Now, fill the branch delay slot with bias strip
- if (lowKey == 0) {
- newLIR0(cUnit, kMipsNop);
- } else {
- if (largeBias) {
- opRegRegReg(cUnit, kOpSub, rKey, rlSrc.lowReg, rKey);
- } else {
- opRegRegImm(cUnit, kOpSub, rKey, rlSrc.lowReg, lowKey);
- }
- }
- genBarrier(cUnit); // Scheduling barrier
+ // Construct BaseLabel and set up table base register
+ LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later
+ tabRec->anchor = baseLabel;
- // Construct BaseLabel and set up table base register
- LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
- // Remember base label so offsets can be computed later
- tabRec->anchor = baseLabel;
+ // Bounds check - if < 0 or >= size continue following switch
+ LIR* branchOver = opCmpImmBranch(cUnit, kCondHi, rKey, size-1, NULL);
- // Bounds check - if < 0 or >= size continue following switch
- LIR* branchOver = opCmpImmBranch(cUnit, kCondHi, rKey, size-1, NULL);
+ // Materialize the table base pointer
+ int rBase = oatAllocTemp(cUnit);
+ newLIR4(cUnit, kMipsDelta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
- // Materialize the table base pointer
- int rBase = oatAllocTemp(cUnit);
- newLIR4(cUnit, kMipsDelta, rBase, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+ // Load the displacement from the switch table
+ int rDisp = oatAllocTemp(cUnit);
+ loadBaseIndexed(cUnit, rBase, rKey, rDisp, 2, kWord);
- // Load the displacement from the switch table
- int rDisp = oatAllocTemp(cUnit);
- loadBaseIndexed(cUnit, rBase, rKey, rDisp, 2, kWord);
+ // Add to r_AP and go
+ opRegRegReg(cUnit, kOpAdd, r_RA, r_RA, rDisp);
+ opReg(cUnit, kOpBx, r_RA);
- // Add to r_AP and go
- opRegRegReg(cUnit, kOpAdd, r_RA, r_RA, rDisp);
- opReg(cUnit, kOpBx, r_RA);
-
- /* branchOver target here */
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = (LIR*)target;
+ /* branchOver target here */
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)target;
}
/*
@@ -227,60 +227,58 @@
*/
void genFillArrayData(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
- // Add the table to the list - we'll process it later
- FillArrayData *tabRec = (FillArrayData *)
- oatNew(cUnit, sizeof(FillArrayData), true, kAllocData);
- tabRec->table = table;
- tabRec->vaddr = mir->offset;
- u2 width = tabRec->table[1];
- u4 size = tabRec->table[2] | (((u4)tabRec->table[3]) << 16);
- tabRec->size = (size * width) + 8;
+ const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ // Add the table to the list - we'll process it later
+ FillArrayData *tabRec = (FillArrayData *)
+ oatNew(cUnit, sizeof(FillArrayData), true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = mir->offset;
+ u2 width = tabRec->table[1];
+ u4 size = tabRec->table[2] | (((u4)tabRec->table[3]) << 16);
+ tabRec->size = (size * width) + 8;
- oatInsertGrowableList(cUnit, &cUnit->fillArrayData, (intptr_t)tabRec);
+ oatInsertGrowableList(cUnit, &cUnit->fillArrayData, (intptr_t)tabRec);
- // Making a call - use explicit registers
- oatFlushAllRegs(cUnit); /* Everything to home location */
- oatLockCallTemps(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0);
+ // Making a call - use explicit registers
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ oatLockCallTemps(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0);
- // Must prevent code motion for the curr pc pair
- genBarrier(cUnit);
- newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
- // Now, fill the branch delay slot with the helper load
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
- genBarrier(cUnit); // Scheduling barrier
+ // Must prevent code motion for the curr pc pair
+ genBarrier(cUnit);
+ newLIR0(cUnit, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot with the helper load
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+ genBarrier(cUnit); // Scheduling barrier
- // Construct BaseLabel and set up table base register
- LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
+ // Construct BaseLabel and set up table base register
+ LIR* baseLabel = newLIR0(cUnit, kPseudoTargetLabel);
- // Materialize a pointer to the fill data image
- newLIR4(cUnit, kMipsDelta, rARG1, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
+ // Materialize a pointer to the fill data image
+ newLIR4(cUnit, kMipsDelta, rARG1, 0, (intptr_t)baseLabel, (intptr_t)tabRec);
- // And go...
- oatClobberCalleeSave(cUnit);
- opReg(cUnit, kOpBlx, rTgt); // ( array*, fill_data* )
+ // And go...
+ oatClobberCalleeSave(cUnit);
+ opReg(cUnit, kOpBlx, rTgt); // ( array*, fill_data* )
}
void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- RegLocation rlResult;
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegImm(cUnit, kOpAdd, rlResult.lowReg,
- rlSrc.lowReg, 0x80000000);
- storeValue(cUnit, rlDest, rlResult);
+ RegLocation rlResult;
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, 0x80000000);
+ storeValue(cUnit, rlDest, rlResult);
}
void genNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- RegLocation rlResult;
- rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg,
- 0x80000000);
- opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- storeValueWide(cUnit, rlDest, rlResult);
+ RegLocation rlResult;
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, 0x80000000);
+ opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ storeValueWide(cUnit, rlDest, rlResult);
}
/*
@@ -288,14 +286,14 @@
*/
void genMonitorEnter(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
- // Go expensive route - artLockObjectFromCode(self, obj);
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode));
- oatClobberCalleeSave(cUnit);
- opReg(cUnit, kOpBlx, rTgt);
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ // Go expensive route - artLockObjectFromCode(self, obj);
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode));
+ oatClobberCalleeSave(cUnit);
+ opReg(cUnit, kOpBlx, rTgt);
}
/*
@@ -303,14 +301,14 @@
*/
void genMonitorExit(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
- // Go expensive route - UnlockObjectFromCode(obj);
- int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
- oatClobberCalleeSave(cUnit);
- opReg(cUnit, kOpBlx, rTgt);
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ // Go expensive route - UnlockObjectFromCode(obj);
+ int rTgt = loadHelper(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+ oatClobberCalleeSave(cUnit);
+ opReg(cUnit, kOpBlx, rTgt);
}
/*
@@ -330,190 +328,190 @@
*
*/
void genCmpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
- int t0 = oatAllocTemp(cUnit);
- int t1 = oatAllocTemp(cUnit);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- newLIR3(cUnit, kMipsSlt, t0, rlSrc1.highReg, rlSrc2.highReg);
- newLIR3(cUnit, kMipsSlt, t1, rlSrc2.highReg, rlSrc1.highReg);
- newLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
- LIR* branch = opCmpImmBranch(cUnit, kCondNe, rlResult.lowReg, 0, NULL);
- newLIR3(cUnit, kMipsSltu, t0, rlSrc1.lowReg, rlSrc2.lowReg);
- newLIR3(cUnit, kMipsSltu, t1, rlSrc2.lowReg, rlSrc1.lowReg);
- newLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
- oatFreeTemp(cUnit, t0);
- oatFreeTemp(cUnit, t1);
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branch->target = (LIR*)target;
- storeValue(cUnit, rlDest, rlResult);
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
+ int t0 = oatAllocTemp(cUnit);
+ int t1 = oatAllocTemp(cUnit);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ newLIR3(cUnit, kMipsSlt, t0, rlSrc1.highReg, rlSrc2.highReg);
+ newLIR3(cUnit, kMipsSlt, t1, rlSrc2.highReg, rlSrc1.highReg);
+ newLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
+ LIR* branch = opCmpImmBranch(cUnit, kCondNe, rlResult.lowReg, 0, NULL);
+ newLIR3(cUnit, kMipsSltu, t0, rlSrc1.lowReg, rlSrc2.lowReg);
+ newLIR3(cUnit, kMipsSltu, t1, rlSrc2.lowReg, rlSrc1.lowReg);
+ newLIR3(cUnit, kMipsSubu, rlResult.lowReg, t1, t0);
+ oatFreeTemp(cUnit, t0);
+ oatFreeTemp(cUnit, t1);
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branch->target = (LIR*)target;
+ storeValue(cUnit, rlDest, rlResult);
}
LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
- int src2, LIR* target)
+ int src2, LIR* target)
{
- LIR* branch;
- MipsOpCode sltOp;
- MipsOpCode brOp;
- bool cmpZero = false;
- bool swapped = false;
- switch (cond) {
- case kCondEq:
- brOp = kMipsBeq;
- cmpZero = true;
- break;
- case kCondNe:
- brOp = kMipsBne;
- cmpZero = true;
- break;
- case kCondCc:
- sltOp = kMipsSltu;
- brOp = kMipsBnez;
- break;
- case kCondCs:
- sltOp = kMipsSltu;
- brOp = kMipsBeqz;
- break;
- case kCondGe:
- sltOp = kMipsSlt;
- brOp = kMipsBeqz;
- break;
- case kCondGt:
- sltOp = kMipsSlt;
- brOp = kMipsBnez;
- swapped = true;
- break;
- case kCondLe:
- sltOp = kMipsSlt;
- brOp = kMipsBeqz;
- swapped = true;
- break;
- case kCondLt:
- sltOp = kMipsSlt;
- brOp = kMipsBnez;
- break;
- case kCondHi: // Gtu
- sltOp = kMipsSltu;
- brOp = kMipsBnez;
- swapped = true;
- break;
- default:
- LOG(FATAL) << "No support for ConditionCode: " << (int) cond;
- return NULL;
- }
- if (cmpZero) {
- branch = newLIR2(cUnit, brOp, src1, src2);
+ LIR* branch;
+ MipsOpCode sltOp;
+ MipsOpCode brOp;
+ bool cmpZero = false;
+ bool swapped = false;
+ switch (cond) {
+ case kCondEq:
+ brOp = kMipsBeq;
+ cmpZero = true;
+ break;
+ case kCondNe:
+ brOp = kMipsBne;
+ cmpZero = true;
+ break;
+ case kCondCc:
+ sltOp = kMipsSltu;
+ brOp = kMipsBnez;
+ break;
+ case kCondCs:
+ sltOp = kMipsSltu;
+ brOp = kMipsBeqz;
+ break;
+ case kCondGe:
+ sltOp = kMipsSlt;
+ brOp = kMipsBeqz;
+ break;
+ case kCondGt:
+ sltOp = kMipsSlt;
+ brOp = kMipsBnez;
+ swapped = true;
+ break;
+ case kCondLe:
+ sltOp = kMipsSlt;
+ brOp = kMipsBeqz;
+ swapped = true;
+ break;
+ case kCondLt:
+ sltOp = kMipsSlt;
+ brOp = kMipsBnez;
+ break;
+ case kCondHi: // Gtu
+ sltOp = kMipsSltu;
+ brOp = kMipsBnez;
+ swapped = true;
+ break;
+ default:
+ LOG(FATAL) << "No support for ConditionCode: " << (int) cond;
+ return NULL;
+ }
+ if (cmpZero) {
+ branch = newLIR2(cUnit, brOp, src1, src2);
+ } else {
+ int tReg = oatAllocTemp(cUnit);
+ if (swapped) {
+ newLIR3(cUnit, sltOp, tReg, src2, src1);
} else {
- int tReg = oatAllocTemp(cUnit);
- if (swapped) {
- newLIR3(cUnit, sltOp, tReg, src2, src1);
- } else {
- newLIR3(cUnit, sltOp, tReg, src1, src2);
- }
- branch = newLIR1(cUnit, brOp, tReg);
- oatFreeTemp(cUnit, tReg);
+ newLIR3(cUnit, sltOp, tReg, src1, src2);
}
- branch->target = target;
- return branch;
+ branch = newLIR1(cUnit, brOp, tReg);
+ oatFreeTemp(cUnit, tReg);
+ }
+ branch->target = target;
+ return branch;
}
LIR* opCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
- int checkValue, LIR* target)
+ int checkValue, LIR* target)
{
- LIR* branch;
- if (checkValue != 0) {
- // TUNING: handle s16 & kCondLt/Mi case using slti
- int tReg = oatAllocTemp(cUnit);
- loadConstant(cUnit, tReg, checkValue);
- branch = opCmpBranch(cUnit, cond, reg, tReg, target);
- oatFreeTemp(cUnit, tReg);
- return branch;
- }
- MipsOpCode opc;
- switch (cond) {
- case kCondEq: opc = kMipsBeqz; break;
- case kCondGe: opc = kMipsBgez; break;
- case kCondGt: opc = kMipsBgtz; break;
- case kCondLe: opc = kMipsBlez; break;
- //case KCondMi:
- case kCondLt: opc = kMipsBltz; break;
- case kCondNe: opc = kMipsBnez; break;
- default:
- // Tuning: use slti when applicable
- int tReg = oatAllocTemp(cUnit);
- loadConstant(cUnit, tReg, checkValue);
- branch = opCmpBranch(cUnit, cond, reg, tReg, target);
- oatFreeTemp(cUnit, tReg);
- return branch;
- }
- branch = newLIR1(cUnit, opc, reg);
- branch->target = target;
+ LIR* branch;
+ if (checkValue != 0) {
+ // TUNING: handle s16 & kCondLt/Mi case using slti
+ int tReg = oatAllocTemp(cUnit);
+ loadConstant(cUnit, tReg, checkValue);
+ branch = opCmpBranch(cUnit, cond, reg, tReg, target);
+ oatFreeTemp(cUnit, tReg);
return branch;
+ }
+ MipsOpCode opc;
+ switch (cond) {
+ case kCondEq: opc = kMipsBeqz; break;
+ case kCondGe: opc = kMipsBgez; break;
+ case kCondGt: opc = kMipsBgtz; break;
+ case kCondLe: opc = kMipsBlez; break;
+ //case KCondMi:
+ case kCondLt: opc = kMipsBltz; break;
+ case kCondNe: opc = kMipsBnez; break;
+ default:
+ // Tuning: use slti when applicable
+ int tReg = oatAllocTemp(cUnit);
+ loadConstant(cUnit, tReg, checkValue);
+ branch = opCmpBranch(cUnit, cond, reg, tReg, target);
+ oatFreeTemp(cUnit, tReg);
+ return branch;
+ }
+ branch = newLIR1(cUnit, opc, reg);
+ branch->target = target;
+ return branch;
}
LIR* opRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
{
#ifdef __mips_hard_float
- if (FPREG(rDest) || FPREG(rSrc))
- return fpRegCopy(cUnit, rDest, rSrc);
+ if (FPREG(rDest) || FPREG(rSrc))
+ return fpRegCopy(cUnit, rDest, rSrc);
#endif
- LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kMipsMove,
- rDest, rSrc);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kMipsMove,
+ rDest, rSrc);
+ if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
}
LIR* opRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
{
- LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
- oatAppendLIR(cUnit, (LIR*)res);
- return res;
+ LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
+ oatAppendLIR(cUnit, (LIR*)res);
+ return res;
}
void opRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
- int srcLo, int srcHi)
+ int srcLo, int srcHi)
{
#ifdef __mips_hard_float
- bool destFP = FPREG(destLo) && FPREG(destHi);
- bool srcFP = FPREG(srcLo) && FPREG(srcHi);
- assert(FPREG(srcLo) == FPREG(srcHi));
- assert(FPREG(destLo) == FPREG(destHi));
- if (destFP) {
- if (srcFP) {
- opRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi));
- } else {
- /* note the operands are swapped for the mtc1 instr */
- newLIR2(cUnit, kMipsMtc1, srcLo, destLo);
- newLIR2(cUnit, kMipsMtc1, srcHi, destHi);
- }
+ bool destFP = FPREG(destLo) && FPREG(destHi);
+ bool srcFP = FPREG(srcLo) && FPREG(srcHi);
+ assert(FPREG(srcLo) == FPREG(srcHi));
+ assert(FPREG(destLo) == FPREG(destHi));
+ if (destFP) {
+ if (srcFP) {
+ opRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi));
} else {
- if (srcFP) {
- newLIR2(cUnit, kMipsMfc1, destLo, srcLo);
- newLIR2(cUnit, kMipsMfc1, destHi, srcHi);
- } else {
- // Handle overlap
- if (srcHi == destLo) {
- opRegCopy(cUnit, destHi, srcHi);
- opRegCopy(cUnit, destLo, srcLo);
- } else {
- opRegCopy(cUnit, destLo, srcLo);
- opRegCopy(cUnit, destHi, srcHi);
- }
- }
+ /* note the operands are swapped for the mtc1 instr */
+ newLIR2(cUnit, kMipsMtc1, srcLo, destLo);
+ newLIR2(cUnit, kMipsMtc1, srcHi, destHi);
}
+ } else {
+ if (srcFP) {
+ newLIR2(cUnit, kMipsMfc1, destLo, srcLo);
+ newLIR2(cUnit, kMipsMfc1, destHi, srcHi);
+ } else {
+ // Handle overlap
+ if (srcHi == destLo) {
+ opRegCopy(cUnit, destHi, srcHi);
+ opRegCopy(cUnit, destLo, srcLo);
+ } else {
+ opRegCopy(cUnit, destLo, srcLo);
+ opRegCopy(cUnit, destHi, srcHi);
+ }
+ }
+ }
#else
- // Handle overlap
- if (srcHi == destLo) {
- opRegCopy(cUnit, destHi, srcHi);
- opRegCopy(cUnit, destLo, srcLo);
- } else {
- opRegCopy(cUnit, destLo, srcLo);
- opRegCopy(cUnit, destHi, srcHi);
- }
+ // Handle overlap
+ if (srcHi == destLo) {
+ opRegCopy(cUnit, destHi, srcHi);
+ opRegCopy(cUnit, destLo, srcLo);
+ } else {
+ opRegCopy(cUnit, destLo, srcLo);
+ opRegCopy(cUnit, destHi, srcHi);
+ }
#endif
}
diff --git a/src/compiler/codegen/mips/Mips32/Ralloc.cc b/src/compiler/codegen/mips/Mips32/Ralloc.cc
index f8440a4..9f39212 100644
--- a/src/compiler/codegen/mips/Mips32/Ralloc.cc
+++ b/src/compiler/codegen/mips/Mips32/Ralloc.cc
@@ -29,107 +29,107 @@
* high reg in next byte.
*/
int oatAllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
- int regClass)
+ int regClass)
{
- int highReg;
- int lowReg;
- int res = 0;
+ int highReg;
+ int lowReg;
+ int res = 0;
#ifdef __mips_hard_float
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- lowReg = oatAllocTempDouble(cUnit);
- highReg = lowReg + 1;
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
- return res;
- }
-#endif
-
- lowReg = oatAllocTemp(cUnit);
- highReg = oatAllocTemp(cUnit);
+ if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
+ lowReg = oatAllocTempDouble(cUnit);
+ highReg = lowReg + 1;
res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
return res;
+ }
+#endif
+
+ lowReg = oatAllocTemp(cUnit);
+ highReg = oatAllocTemp(cUnit);
+ res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ return res;
}
int oatAllocTypedTemp(CompilationUnit *cUnit, bool fpHint, int regClass)
{
#ifdef __mips_hard_float
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
+ if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
{
- return oatAllocTempFloat(cUnit);
+ return oatAllocTempFloat(cUnit);
}
#endif
- return oatAllocTemp(cUnit);
+ return oatAllocTemp(cUnit);
}
void oatInitializeRegAlloc(CompilationUnit* cUnit)
{
- int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
- int numReserved = sizeof(reservedRegs)/sizeof(*reservedRegs);
- int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
+ int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
+ int numReserved = sizeof(reservedRegs)/sizeof(*reservedRegs);
+ int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
#ifdef __mips_hard_float
- int numFPRegs = sizeof(fpRegs)/sizeof(*fpRegs);
- int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
+ int numFPRegs = sizeof(fpRegs)/sizeof(*fpRegs);
+ int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
#else
- int numFPRegs = 0;
- int numFPTemps = 0;
+ int numFPRegs = 0;
+ int numFPTemps = 0;
#endif
- RegisterPool *pool = (RegisterPool *)oatNew(cUnit, sizeof(*pool), true,
- kAllocRegAlloc);
- cUnit->regPool = pool;
- pool->numCoreRegs = numRegs;
- pool->coreRegs = (RegisterInfo *)
- oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs),
- true, kAllocRegAlloc);
- pool->numFPRegs = numFPRegs;
- pool->FPRegs = (RegisterInfo *)
- oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
- kAllocRegAlloc);
- oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
- oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
- // Keep special registers from being allocated
- for (int i = 0; i < numReserved; i++) {
- if (NO_SUSPEND && !cUnit->genDebugger &&
- (reservedRegs[i] == rSUSPEND)) {
- //To measure cost of suspend check
- continue;
- }
- oatMarkInUse(cUnit, reservedRegs[i]);
+ RegisterPool *pool = (RegisterPool *)oatNew(cUnit, sizeof(*pool), true,
+ kAllocRegAlloc);
+ cUnit->regPool = pool;
+ pool->numCoreRegs = numRegs;
+ pool->coreRegs = (RegisterInfo *)
+ oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs),
+ true, kAllocRegAlloc);
+ pool->numFPRegs = numFPRegs;
+ pool->FPRegs = (RegisterInfo *)
+ oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
+ kAllocRegAlloc);
+ oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
+ oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
+ // Keep special registers from being allocated
+ for (int i = 0; i < numReserved; i++) {
+ if (NO_SUSPEND && !cUnit->genDebugger &&
+ (reservedRegs[i] == rSUSPEND)) {
+ //To measure cost of suspend check
+ continue;
}
- // Mark temp regs - all others not in use can be used for promotion
- for (int i = 0; i < numTemps; i++) {
- oatMarkTemp(cUnit, coreTemps[i]);
+ oatMarkInUse(cUnit, reservedRegs[i]);
+ }
+ // Mark temp regs - all others not in use can be used for promotion
+ for (int i = 0; i < numTemps; i++) {
+ oatMarkTemp(cUnit, coreTemps[i]);
+ }
+ for (int i = 0; i < numFPTemps; i++) {
+ oatMarkTemp(cUnit, fpTemps[i]);
+ }
+ // Construct the alias map.
+ cUnit->phiAliasMap = (int*)oatNew(cUnit, cUnit->numSSARegs *
+ sizeof(cUnit->phiAliasMap[0]), false,
+ kAllocDFInfo);
+ for (int i = 0; i < cUnit->numSSARegs; i++) {
+ cUnit->phiAliasMap[i] = i;
+ }
+ for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
+ int defReg = phi->ssaRep->defs[0];
+ for (int i = 0; i < phi->ssaRep->numUses; i++) {
+ for (int j = 0; j < cUnit->numSSARegs; j++) {
+ if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
+ cUnit->phiAliasMap[j] = defReg;
+ }
+ }
}
- for (int i = 0; i < numFPTemps; i++) {
- oatMarkTemp(cUnit, fpTemps[i]);
- }
- // Construct the alias map.
- cUnit->phiAliasMap = (int*)oatNew(cUnit, cUnit->numSSARegs *
- sizeof(cUnit->phiAliasMap[0]), false,
- kAllocDFInfo);
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- cUnit->phiAliasMap[i] = i;
- }
- for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
- int defReg = phi->ssaRep->defs[0];
- for (int i = 0; i < phi->ssaRep->numUses; i++) {
- for (int j = 0; j < cUnit->numSSARegs; j++) {
- if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
- cUnit->phiAliasMap[j] = defReg;
- }
- }
- }
- }
+ }
}
void freeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
- RegLocation rlFree)
+ RegLocation rlFree)
{
- if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
- (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
- // No overlap, free both
- oatFreeTemp(cUnit, rlFree.lowReg);
- oatFreeTemp(cUnit, rlFree.highReg);
- }
+ if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
+ (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
+ // No overlap, free both
+ oatFreeTemp(cUnit, rlFree.lowReg);
+ oatFreeTemp(cUnit, rlFree.highReg);
+ }
}
diff --git a/src/compiler/codegen/mips/MipsLIR.h b/src/compiler/codegen/mips/MipsLIR.h
index c0ff298..ac222c4 100644
--- a/src/compiler/codegen/mips/MipsLIR.h
+++ b/src/compiler/codegen/mips/MipsLIR.h
@@ -146,33 +146,33 @@
/* RegisterLocation templates return values (r_V0, or r_V0/r_V1) */
#define LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 1, r_V0, INVALID_REG, \
- INVALID_SREG}
+ INVALID_SREG}
#define LOC_C_RETURN_FLOAT LOC_C_RETURN
#define LOC_C_RETURN_ALT {kLocPhysReg, 0, 0, 0, 0, 0, 1, r_F0, INVALID_REG, \
- INVALID_SREG}
+ INVALID_SREG}
#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 1, r_RESULT0, r_RESULT1,\
- INVALID_SREG}
+ INVALID_SREG}
#define LOC_C_RETURN_WIDE_DOUBLE LOC_C_RETURN_WIDE
#define LOC_C_RETURN_WIDE_ALT {kLocPhysReg, 1, 0, 0, 0, 0, 1, r_FRESULT0,\
- r_FRESULT1, INVALID_SREG}
+ r_FRESULT1, INVALID_SREG}
enum ResourceEncodingPos {
- kGPReg0 = 0,
- kRegSP = 29,
- kRegLR = 31,
- kFPReg0 = 32, /* only 16 fp regs supported currently */
- kFPRegEnd = 48,
- kRegHI = kFPRegEnd,
- kRegLO,
- kRegPC,
- kRegEnd = 51,
- kCCode = kRegEnd,
- kFPStatus, // FP status word
- // The following four bits are for memory disambiguation
- kDalvikReg, // 1 Dalvik Frame (can be fully disambiguated)
- kLiteral, // 2 Literal pool (can be fully disambiguated)
- kHeapRef, // 3 Somewhere on the heap (alias with any other heap)
- kMustNotAlias, // 4 Guaranteed to be non-alias (eg *(r6+x))
+ kGPReg0 = 0,
+ kRegSP = 29,
+ kRegLR = 31,
+ kFPReg0 = 32, /* only 16 fp regs supported currently */
+ kFPRegEnd = 48,
+ kRegHI = kFPRegEnd,
+ kRegLO,
+ kRegPC,
+ kRegEnd = 51,
+ kCCode = kRegEnd,
+ kFPStatus, // FP status word
+ // The following four bits are for memory disambiguation
+ kDalvikReg, // 1 Dalvik Frame (can be fully disambiguated)
+ kLiteral, // 2 Literal pool (can be fully disambiguated)
+ kHeapRef, // 3 Somewhere on the heap (alias with any other heap)
+ kMustNotAlias, // 4 Guaranteed to be non-alias (eg *(r6+x))
};
#define ENCODE_REG_LIST(N) ((u8) N)
@@ -190,7 +190,7 @@
#define ENCODE_ALL (~0ULL)
#define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
- ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
+ ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff)
#define DECODE_ALIAS_INFO_WIDE(X) ((X & 0x80000000) ? 1 : 0)
@@ -200,94 +200,94 @@
*/
enum NativeRegisterPool {
- r_ZERO = 0,
- r_AT = 1,
- r_V0 = 2,
- r_V1 = 3,
- r_A0 = 4,
- r_A1 = 5,
- r_A2 = 6,
- r_A3 = 7,
- r_T0 = 8,
- r_T1 = 9,
- r_T2 = 10,
- r_T3 = 11,
- r_T4 = 12,
- r_T5 = 13,
- r_T6 = 14,
- r_T7 = 15,
- r_S0 = 16,
- r_S1 = 17,
- r_S2 = 18,
- r_S3 = 19,
- r_S4 = 20,
- r_S5 = 21,
- r_S6 = 22,
- r_S7 = 23,
- r_T8 = 24,
- r_T9 = 25,
- r_K0 = 26,
- r_K1 = 27,
- r_GP = 28,
- r_SP = 29,
- r_FP = 30,
- r_RA = 31,
+ r_ZERO = 0,
+ r_AT = 1,
+ r_V0 = 2,
+ r_V1 = 3,
+ r_A0 = 4,
+ r_A1 = 5,
+ r_A2 = 6,
+ r_A3 = 7,
+ r_T0 = 8,
+ r_T1 = 9,
+ r_T2 = 10,
+ r_T3 = 11,
+ r_T4 = 12,
+ r_T5 = 13,
+ r_T6 = 14,
+ r_T7 = 15,
+ r_S0 = 16,
+ r_S1 = 17,
+ r_S2 = 18,
+ r_S3 = 19,
+ r_S4 = 20,
+ r_S5 = 21,
+ r_S6 = 22,
+ r_S7 = 23,
+ r_T8 = 24,
+ r_T9 = 25,
+ r_K0 = 26,
+ r_K1 = 27,
+ r_GP = 28,
+ r_SP = 29,
+ r_FP = 30,
+ r_RA = 31,
- r_F0 = 0 + FP_REG_OFFSET,
- r_F1,
- r_F2,
- r_F3,
- r_F4,
- r_F5,
- r_F6,
- r_F7,
- r_F8,
- r_F9,
- r_F10,
- r_F11,
- r_F12,
- r_F13,
- r_F14,
- r_F15,
+ r_F0 = 0 + FP_REG_OFFSET,
+ r_F1,
+ r_F2,
+ r_F3,
+ r_F4,
+ r_F5,
+ r_F6,
+ r_F7,
+ r_F8,
+ r_F9,
+ r_F10,
+ r_F11,
+ r_F12,
+ r_F13,
+ r_F14,
+ r_F15,
#if 0 /* only 16 fp regs supported currently */
- r_F16,
- r_F17,
- r_F18,
- r_F19,
- r_F20,
- r_F21,
- r_F22,
- r_F23,
- r_F24,
- r_F25,
- r_F26,
- r_F27,
- r_F28,
- r_F29,
- r_F30,
- r_F31,
+ r_F16,
+ r_F17,
+ r_F18,
+ r_F19,
+ r_F20,
+ r_F21,
+ r_F22,
+ r_F23,
+ r_F24,
+ r_F25,
+ r_F26,
+ r_F27,
+ r_F28,
+ r_F29,
+ r_F30,
+ r_F31,
#endif
- r_DF0 = r_F0 + FP_DOUBLE,
- r_DF1 = r_F2 + FP_DOUBLE,
- r_DF2 = r_F4 + FP_DOUBLE,
- r_DF3 = r_F6 + FP_DOUBLE,
- r_DF4 = r_F8 + FP_DOUBLE,
- r_DF5 = r_F10 + FP_DOUBLE,
- r_DF6 = r_F12 + FP_DOUBLE,
- r_DF7 = r_F14 + FP_DOUBLE,
+ r_DF0 = r_F0 + FP_DOUBLE,
+ r_DF1 = r_F2 + FP_DOUBLE,
+ r_DF2 = r_F4 + FP_DOUBLE,
+ r_DF3 = r_F6 + FP_DOUBLE,
+ r_DF4 = r_F8 + FP_DOUBLE,
+ r_DF5 = r_F10 + FP_DOUBLE,
+ r_DF6 = r_F12 + FP_DOUBLE,
+ r_DF7 = r_F14 + FP_DOUBLE,
#if 0 /* only 16 fp regs supported currently */
- r_DF8 = r_F16 + FP_DOUBLE,
- r_DF9 = r_F18 + FP_DOUBLE,
- r_DF10 = r_F20 + FP_DOUBLE,
- r_DF11 = r_F22 + FP_DOUBLE,
- r_DF12 = r_F24 + FP_DOUBLE,
- r_DF13 = r_F26 + FP_DOUBLE,
- r_DF14 = r_F28 + FP_DOUBLE,
- r_DF15 = r_F30 + FP_DOUBLE,
+ r_DF8 = r_F16 + FP_DOUBLE,
+ r_DF9 = r_F18 + FP_DOUBLE,
+ r_DF10 = r_F20 + FP_DOUBLE,
+ r_DF11 = r_F22 + FP_DOUBLE,
+ r_DF12 = r_F24 + FP_DOUBLE,
+ r_DF13 = r_F26 + FP_DOUBLE,
+ r_DF14 = r_F28 + FP_DOUBLE,
+ r_DF15 = r_F30 + FP_DOUBLE,
#endif
- r_HI = EXTRA_REG_OFFSET,
- r_LO,
- r_PC,
+ r_HI = EXTRA_REG_OFFSET,
+ r_LO,
+ r_PC,
};
/*
@@ -307,10 +307,10 @@
/* Shift encodings */
enum MipsShiftEncodings {
- kMipsLsl = 0x0,
- kMipsLsr = 0x1,
- kMipsAsr = 0x2,
- kMipsRor = 0x3
+ kMipsLsl = 0x0,
+ kMipsLsr = 0x1,
+ kMipsAsr = 0x2,
+ kMipsRor = 0x3
};
// MIPS sync kinds (Note: support for kinds other than kSYNC0 may not exist)
@@ -333,148 +333,148 @@
* Assemble.cc.
*/
enum MipsOpCode {
- kPseudoIntrinsicRetry = -16,
- kPseudoSuspendTarget = -15,
- kPseudoThrowTarget = -14,
- kPseudoCaseLabel = -13,
- kPseudoMethodEntry = -12,
- kPseudoMethodExit = -11,
- kPseudoBarrier = -10,
- kPseudoExtended = -9,
- kPseudoSSARep = -8,
- kPseudoEntryBlock = -7,
- kPseudoExitBlock = -6,
- kPseudoTargetLabel = -5,
- kPseudoDalvikByteCodeBoundary = -4,
- kPseudoPseudoAlign4 = -3,
- kPseudoEHBlockLabel = -2,
- kPseudoNormalBlockLabel = -1,
+ kPseudoIntrinsicRetry = -16,
+ kPseudoSuspendTarget = -15,
+ kPseudoThrowTarget = -14,
+ kPseudoCaseLabel = -13,
+ kPseudoMethodEntry = -12,
+ kPseudoMethodExit = -11,
+ kPseudoBarrier = -10,
+ kPseudoExtended = -9,
+ kPseudoSSARep = -8,
+ kPseudoEntryBlock = -7,
+ kPseudoExitBlock = -6,
+ kPseudoTargetLabel = -5,
+ kPseudoDalvikByteCodeBoundary = -4,
+ kPseudoPseudoAlign4 = -3,
+ kPseudoEHBlockLabel = -2,
+ kPseudoNormalBlockLabel = -1,
- kMipsFirst,
- kMips32BitData = kMipsFirst, /* data [31..0] */
- kMipsAddiu, /* addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] */
- kMipsAddu, /* add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001] */
- kMipsAnd, /* and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100] */
- kMipsAndi, /* andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0] */
- kMipsB, /* b o [0001000000000000] o[15..0] */
- kMipsBal, /* bal o [0000010000010001] o[15..0] */
- /* NOTE: the code tests the range kMipsBeq thru kMipsBne, so
- adding an instruction in this range may require updates */
- kMipsBeq, /* beq s,t,o [000100] s[25..21] t[20..16] o[15..0] */
- kMipsBeqz, /* beqz s,o [000100] s[25..21] [00000] o[15..0] */
- kMipsBgez, /* bgez s,o [000001] s[25..21] [00001] o[15..0] */
- kMipsBgtz, /* bgtz s,o [000111] s[25..21] [00000] o[15..0] */
- kMipsBlez, /* blez s,o [000110] s[25..21] [00000] o[15..0] */
- kMipsBltz, /* bltz s,o [000001] s[25..21] [00000] o[15..0] */
- kMipsBnez, /* bnez s,o [000101] s[25..21] [00000] o[15..0] */
- kMipsBne, /* bne s,t,o [000101] s[25..21] t[20..16] o[15..0] */
- kMipsDiv, /* div s,t [000000] s[25..21] t[20..16] [0000000000011010] */
+ kMipsFirst,
+ kMips32BitData = kMipsFirst, /* data [31..0] */
+ kMipsAddiu, /* addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] */
+ kMipsAddu, /* add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001] */
+ kMipsAnd, /* and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100] */
+ kMipsAndi, /* andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0] */
+ kMipsB, /* b o [0001000000000000] o[15..0] */
+ kMipsBal, /* bal o [0000010000010001] o[15..0] */
+ /* NOTE: the code tests the range kMipsBeq thru kMipsBne, so
+ adding an instruction in this range may require updates */
+ kMipsBeq, /* beq s,t,o [000100] s[25..21] t[20..16] o[15..0] */
+ kMipsBeqz, /* beqz s,o [000100] s[25..21] [00000] o[15..0] */
+ kMipsBgez, /* bgez s,o [000001] s[25..21] [00001] o[15..0] */
+ kMipsBgtz, /* bgtz s,o [000111] s[25..21] [00000] o[15..0] */
+ kMipsBlez, /* blez s,o [000110] s[25..21] [00000] o[15..0] */
+ kMipsBltz, /* bltz s,o [000001] s[25..21] [00000] o[15..0] */
+ kMipsBnez, /* bnez s,o [000101] s[25..21] [00000] o[15..0] */
+ kMipsBne, /* bne s,t,o [000101] s[25..21] t[20..16] o[15..0] */
+ kMipsDiv, /* div s,t [000000] s[25..21] t[20..16] [0000000000011010] */
#if __mips_isa_rev>=2
- kMipsExt, /* ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000] */
+ kMipsExt, /* ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000] */
#endif
- kMipsJal, /* jal t [000011] t[25..0] */
- kMipsJalr, /* jalr d,s [000000] s[25..21] [00000] d[15..11]
- hint[10..6] [001001] */
- kMipsJr, /* jr s [000000] s[25..21] [0000000000] hint[10..6] [001000] */
- kMipsLahi, /* lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi */
- kMipsLalo, /* ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo */
- kMipsLui, /* lui t,imm16 [00111100000] t[20..16] imm16[15..0] */
- kMipsLb, /* lb t,o(b) [100000] b[25..21] t[20..16] o[15..0] */
- kMipsLbu, /* lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0] */
- kMipsLh, /* lh t,o(b) [100001] b[25..21] t[20..16] o[15..0] */
- kMipsLhu, /* lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0] */
- kMipsLw, /* lw t,o(b) [100011] b[25..21] t[20..16] o[15..0] */
- kMipsMfhi, /* mfhi d [0000000000000000] d[15..11] [00000010000] */
- kMipsMflo, /* mflo d [0000000000000000] d[15..11] [00000010010] */
- kMipsMove, /* move d,s [000000] s[25..21] [00000] d[15..11] [00000100101] */
- kMipsMovz, /* movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010] */
- kMipsMul, /* mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010] */
- kMipsNop, /* nop [00000000000000000000000000000000] */
- kMipsNor, /* nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111] */
- kMipsOr, /* or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101] */
- kMipsOri, /* ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] */
- kMipsPref, /* pref h,o(b) [101011] b[25..21] h[20..16] o[15..0] */
- kMipsSb, /* sb t,o(b) [101000] b[25..21] t[20..16] o[15..0] */
+ kMipsJal, /* jal t [000011] t[25..0] */
+ kMipsJalr, /* jalr d,s [000000] s[25..21] [00000] d[15..11]
+ hint[10..6] [001001] */
+ kMipsJr, /* jr s [000000] s[25..21] [0000000000] hint[10..6] [001000] */
+ kMipsLahi, /* lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi */
+ kMipsLalo, /* ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo */
+ kMipsLui, /* lui t,imm16 [00111100000] t[20..16] imm16[15..0] */
+ kMipsLb, /* lb t,o(b) [100000] b[25..21] t[20..16] o[15..0] */
+ kMipsLbu, /* lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0] */
+ kMipsLh, /* lh t,o(b) [100001] b[25..21] t[20..16] o[15..0] */
+ kMipsLhu, /* lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0] */
+ kMipsLw, /* lw t,o(b) [100011] b[25..21] t[20..16] o[15..0] */
+ kMipsMfhi, /* mfhi d [0000000000000000] d[15..11] [00000010000] */
+ kMipsMflo, /* mflo d [0000000000000000] d[15..11] [00000010010] */
+ kMipsMove, /* move d,s [000000] s[25..21] [00000] d[15..11] [00000100101] */
+ kMipsMovz, /* movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010] */
+ kMipsMul, /* mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010] */
+ kMipsNop, /* nop [00000000000000000000000000000000] */
+ kMipsNor, /* nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111] */
+ kMipsOr, /* or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101] */
+ kMipsOri, /* ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] */
+ kMipsPref, /* pref h,o(b) [101011] b[25..21] h[20..16] o[15..0] */
+ kMipsSb, /* sb t,o(b) [101000] b[25..21] t[20..16] o[15..0] */
#if __mips_isa_rev>=2
- kMipsSeb, /* seb d,t [01111100000] t[20..16] d[15..11] [10000100000] */
- kMipsSeh, /* seh d,t [01111100000] t[20..16] d[15..11] [11000100000] */
+ kMipsSeb, /* seb d,t [01111100000] t[20..16] d[15..11] [10000100000] */
+ kMipsSeh, /* seh d,t [01111100000] t[20..16] d[15..11] [11000100000] */
#endif
- kMipsSh, /* sh t,o(b) [101001] b[25..21] t[20..16] o[15..0] */
- kMipsSll, /* sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000] */
- kMipsSllv, /* sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100] */
- kMipsSlt, /* slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010] */
- kMipsSlti, /* slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0] */
- kMipsSltu, /* sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011] */
- kMipsSra, /* sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011] */
- kMipsSrav, /* srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111] */
- kMipsSrl, /* srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010] */
- kMipsSrlv, /* srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110] */
- kMipsSubu, /* subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011] */
- kMipsSw, /* sw t,o(b) [101011] b[25..21] t[20..16] o[15..0] */
- kMipsXor, /* xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110] */
- kMipsXori, /* xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0] */
+ kMipsSh, /* sh t,o(b) [101001] b[25..21] t[20..16] o[15..0] */
+ kMipsSll, /* sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000] */
+ kMipsSllv, /* sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100] */
+ kMipsSlt, /* slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010] */
+ kMipsSlti, /* slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0] */
+ kMipsSltu, /* sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011] */
+ kMipsSra, /* sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011] */
+ kMipsSrav, /* srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111] */
+ kMipsSrl, /* srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010] */
+ kMipsSrlv, /* srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110] */
+ kMipsSubu, /* subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011] */
+ kMipsSw, /* sw t,o(b) [101011] b[25..21] t[20..16] o[15..0] */
+ kMipsXor, /* xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110] */
+ kMipsXori, /* xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0] */
#ifdef __mips_hard_float
- kMipsFadds, /* add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000] */
- kMipsFsubs, /* sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001] */
- kMipsFmuls, /* mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010] */
- kMipsFdivs, /* div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011] */
- kMipsFaddd, /* add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000] */
- kMipsFsubd, /* sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001] */
- kMipsFmuld, /* mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010] */
- kMipsFdivd, /* div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011] */
- kMipsFcvtsd, /* cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000] */
- kMipsFcvtsw, /* cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000] */
- kMipsFcvtds, /* cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001] */
- kMipsFcvtdw, /* cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001] */
- kMipsFcvtws, /* cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100] */
- kMipsFcvtwd, /* cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100] */
- kMipsFmovs, /* mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110] */
- kMipsFmovd, /* mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110] */
- kMipsFlwc1, /* lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0] */
- kMipsFldc1, /* ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0] */
- kMipsFswc1, /* swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0] */
- kMipsFsdc1, /* sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0] */
- kMipsMfc1, /* mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000] */
- kMipsMtc1, /* mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000] */
+ kMipsFadds, /* add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000] */
+ kMipsFsubs, /* sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001] */
+ kMipsFmuls, /* mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010] */
+ kMipsFdivs, /* div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011] */
+ kMipsFaddd, /* add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000] */
+ kMipsFsubd, /* sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001] */
+ kMipsFmuld, /* mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010] */
+ kMipsFdivd, /* div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011] */
+ kMipsFcvtsd,/* cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000] */
+ kMipsFcvtsw,/* cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000] */
+ kMipsFcvtds,/* cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001] */
+ kMipsFcvtdw,/* cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001] */
+ kMipsFcvtws,/* cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100] */
+ kMipsFcvtwd,/* cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100] */
+ kMipsFmovs, /* mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110] */
+ kMipsFmovd, /* mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110] */
+ kMipsFlwc1, /* lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0] */
+ kMipsFldc1, /* ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0] */
+ kMipsFswc1, /* swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0] */
+ kMipsFsdc1, /* sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0] */
+ kMipsMfc1, /* mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000] */
+ kMipsMtc1, /* mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000] */
#endif
- kMipsDelta, /* Psuedo for ori t, s, <label>-<label> */
- kMipsDeltaHi, /* Pseudo for lui t, high16(<label>-<label>) */
- kMipsDeltaLo, /* Pseudo for ori t, s, low16(<label>-<label>) */
- kMipsCurrPC, /* jal to .+8 to materialize pc */
- kMipsSync, /* sync kind [000000] [0000000000000000] s[10..6] [001111] */
- kMipsUndefined, /* undefined [011001xxxxxxxxxxxxxxxx] */
- kMipsLast
+ kMipsDelta, /* Psuedo for ori t, s, <label>-<label> */
+ kMipsDeltaHi, /* Pseudo for lui t, high16(<label>-<label>) */
+ kMipsDeltaLo, /* Pseudo for ori t, s, low16(<label>-<label>) */
+ kMipsCurrPC, /* jal to .+8 to materialize pc */
+ kMipsSync, /* sync kind [000000] [0000000000000000] s[10..6] [001111] */
+ kMipsUndefined, /* undefined [011001xxxxxxxxxxxxxxxx] */
+ kMipsLast
};
/* Bit flags describing the behavior of each native opcode */
enum MipsOpFeatureFlags {
- kIsBranch = 0,
- kRegDef0,
- kRegDef1,
- kRegDefSP,
- kRegDefLR,
- kRegDefList0,
- kRegDefList1,
- kRegUse0,
- kRegUse1,
- kRegUse2,
- kRegUse3,
- kRegUseSP,
- kRegUsePC,
- kRegUseList0,
- kRegUseList1,
- kNoOperand,
- kIsUnaryOp,
- kIsBinaryOp,
- kIsTertiaryOp,
- kIsQuadOp,
- kIsIT,
- kSetsCCodes,
- kUsesCCodes,
- kMemLoad,
- kMemStore,
- kPCRelFixup,
- kRegUseLR,
+ kIsBranch = 0,
+ kRegDef0,
+ kRegDef1,
+ kRegDefSP,
+ kRegDefLR,
+ kRegDefList0,
+ kRegDefList1,
+ kRegUse0,
+ kRegUse1,
+ kRegUse2,
+ kRegUse3,
+ kRegUseSP,
+ kRegUsePC,
+ kRegUseList0,
+ kRegUseList1,
+ kNoOperand,
+ kIsUnaryOp,
+ kIsBinaryOp,
+ kIsTertiaryOp,
+ kIsQuadOp,
+ kIsIT,
+ kSetsCCodes,
+ kUsesCCodes,
+ kMemLoad,
+ kMemStore,
+ kPCRelFixup,
+ kRegUseLR,
};
#define IS_LOAD (1 << kMemLoad)
@@ -527,36 +527,35 @@
/* Instruction assembly fieldLoc kind */
enum MipsEncodingKind {
- kFmtUnused,
- kFmtBitBlt, /* Bit string using end/start */
- kFmtDfp, /* Double FP reg */
- kFmtSfp, /* Single FP reg */
- kFmtBlt5_2, /* Same 5-bit field to 2 locations */
+ kFmtUnused,
+ kFmtBitBlt, /* Bit string using end/start */
+ kFmtDfp, /* Double FP reg */
+ kFmtSfp, /* Single FP reg */
+ kFmtBlt5_2, /* Same 5-bit field to 2 locations */
};
/* Struct used to define the snippet positions for each MIPS opcode */
struct MipsEncodingMap {
- u4 skeleton;
- struct {
- MipsEncodingKind kind;
- int end; /* end for kFmtBitBlt, 1-bit slice end for FP regs */
- int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
- } fieldLoc[4];
- MipsOpCode opcode;
- int flags;
- const char *name;
- const char* fmt;
- int size; /* Size in bytes */
+ u4 skeleton;
+ struct {
+ MipsEncodingKind kind;
+ int end; /* end for kFmtBitBlt, 1-bit slice end for FP regs */
+ int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
+ } fieldLoc[4];
+ MipsOpCode opcode;
+ int flags;
+ const char *name;
+ const char* fmt;
+ int size; /* Size in bytes */
};
/* Keys for target-specific scheduling and other optimization hints */
enum MipsTargetOptHints {
- kMaxHoistDistance,
+ kMaxHoistDistance,
};
extern MipsEncodingMap EncodingMap[kMipsLast];
-
#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) /* 2 offsets must fit */
diff --git a/src/compiler/codegen/mips/MipsRallocUtil.cc b/src/compiler/codegen/mips/MipsRallocUtil.cc
index 7ed3f86..ded59f0 100644
--- a/src/compiler/codegen/mips/MipsRallocUtil.cc
+++ b/src/compiler/codegen/mips/MipsRallocUtil.cc
@@ -35,8 +35,8 @@
void oatAdjustSpillMask(CompilationUnit* cUnit)
{
- cUnit->coreSpillMask |= (1 << r_RA);
- cUnit->numCoreSpills++;
+ cUnit->coreSpillMask |= (1 << r_RA);
+ cUnit->numCoreSpills++;
}
/*
@@ -47,145 +47,141 @@
*/
void oatMarkPreservedSingle(CompilationUnit* cUnit, int sReg, int reg)
{
- LOG(FATAL) << "No support yet for promoted FP regs";
+ LOG(FATAL) << "No support yet for promoted FP regs";
}
void oatFlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
{
- RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
- DCHECK(info1 && info2 && info1->pair && info2->pair &&
- (info1->partner == info2->reg) &&
- (info2->partner == info1->reg));
- if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
- LOG(FATAL) << "Long half-temp, half-promoted";
- }
-
- info1->dirty = false;
- info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) <
- SRegToVReg(cUnit, info1->sReg))
- info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- oatFlushRegWideImpl(cUnit, rSP,
- oatVRegOffset(cUnit, vReg),
- info1->reg, info1->partner);
+ RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
+ RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->isTemp && info2->isTemp)) {
+ /* Should not happen. If it does, there's a problem in evalLoc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
}
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (SRegToVReg(cUnit, info2->sReg) < SRegToVReg(cUnit, info1->sReg))
+ info1 = info2;
+ int vReg = SRegToVReg(cUnit, info1->sReg);
+ oatFlushRegWideImpl(cUnit, rSP, oatVRegOffset(cUnit, vReg), info1->reg,
+ info1->partner);
+ }
}
void oatFlushReg(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- if (info->live && info->dirty) {
- info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- oatFlushRegImpl(cUnit, rSP,
- oatVRegOffset(cUnit, vReg),
- reg, kWord);
- }
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int vReg = SRegToVReg(cUnit, info->sReg);
+ oatFlushRegImpl(cUnit, rSP, oatVRegOffset(cUnit, vReg), reg, kWord);
+ }
}
/* Give access to the target-dependent FP register encoding to common code */
bool oatIsFpReg(int reg) {
- return FPREG(reg);
+ return FPREG(reg);
}
uint32_t oatFpRegMask() {
- return FP_REG_MASK;
+ return FP_REG_MASK;
}
/* Clobber all regs that might be used by an external C call */
extern void oatClobberCalleeSave(CompilationUnit *cUnit)
{
- oatClobber(cUnit, r_ZERO);
- oatClobber(cUnit, r_AT);
- oatClobber(cUnit, r_V0);
- oatClobber(cUnit, r_V1);
- oatClobber(cUnit, r_A0);
- oatClobber(cUnit, r_A1);
- oatClobber(cUnit, r_A2);
- oatClobber(cUnit, r_A3);
- oatClobber(cUnit, r_T0);
- oatClobber(cUnit, r_T1);
- oatClobber(cUnit, r_T2);
- oatClobber(cUnit, r_T3);
- oatClobber(cUnit, r_T4);
- oatClobber(cUnit, r_T5);
- oatClobber(cUnit, r_T6);
- oatClobber(cUnit, r_T7);
- oatClobber(cUnit, r_T8);
- oatClobber(cUnit, r_T9);
- oatClobber(cUnit, r_K0);
- oatClobber(cUnit, r_K1);
- oatClobber(cUnit, r_GP);
- oatClobber(cUnit, r_FP);
- oatClobber(cUnit, r_RA);
- oatClobber(cUnit, r_F0);
- oatClobber(cUnit, r_F1);
- oatClobber(cUnit, r_F2);
- oatClobber(cUnit, r_F3);
- oatClobber(cUnit, r_F4);
- oatClobber(cUnit, r_F5);
- oatClobber(cUnit, r_F6);
- oatClobber(cUnit, r_F7);
- oatClobber(cUnit, r_F8);
- oatClobber(cUnit, r_F9);
- oatClobber(cUnit, r_F10);
- oatClobber(cUnit, r_F11);
- oatClobber(cUnit, r_F12);
- oatClobber(cUnit, r_F13);
- oatClobber(cUnit, r_F14);
- oatClobber(cUnit, r_F15);
+ oatClobber(cUnit, r_ZERO);
+ oatClobber(cUnit, r_AT);
+ oatClobber(cUnit, r_V0);
+ oatClobber(cUnit, r_V1);
+ oatClobber(cUnit, r_A0);
+ oatClobber(cUnit, r_A1);
+ oatClobber(cUnit, r_A2);
+ oatClobber(cUnit, r_A3);
+ oatClobber(cUnit, r_T0);
+ oatClobber(cUnit, r_T1);
+ oatClobber(cUnit, r_T2);
+ oatClobber(cUnit, r_T3);
+ oatClobber(cUnit, r_T4);
+ oatClobber(cUnit, r_T5);
+ oatClobber(cUnit, r_T6);
+ oatClobber(cUnit, r_T7);
+ oatClobber(cUnit, r_T8);
+ oatClobber(cUnit, r_T9);
+ oatClobber(cUnit, r_K0);
+ oatClobber(cUnit, r_K1);
+ oatClobber(cUnit, r_GP);
+ oatClobber(cUnit, r_FP);
+ oatClobber(cUnit, r_RA);
+ oatClobber(cUnit, r_F0);
+ oatClobber(cUnit, r_F1);
+ oatClobber(cUnit, r_F2);
+ oatClobber(cUnit, r_F3);
+ oatClobber(cUnit, r_F4);
+ oatClobber(cUnit, r_F5);
+ oatClobber(cUnit, r_F6);
+ oatClobber(cUnit, r_F7);
+ oatClobber(cUnit, r_F8);
+ oatClobber(cUnit, r_F9);
+ oatClobber(cUnit, r_F10);
+ oatClobber(cUnit, r_F11);
+ oatClobber(cUnit, r_F12);
+ oatClobber(cUnit, r_F13);
+ oatClobber(cUnit, r_F14);
+ oatClobber(cUnit, r_F15);
}
extern RegLocation oatGetReturnWideAlt(CompilationUnit* cUnit)
{
- RegLocation res = LOC_C_RETURN_WIDE_ALT;
- oatClobber(cUnit, res.lowReg);
- oatClobber(cUnit, res.highReg);
- oatMarkInUse(cUnit, res.lowReg);
- oatMarkInUse(cUnit, res.highReg);
- oatMarkPair(cUnit, res.lowReg, res.highReg);
- return res;
+ RegLocation res = LOC_C_RETURN_WIDE_ALT;
+ oatClobber(cUnit, res.lowReg);
+ oatClobber(cUnit, res.highReg);
+ oatMarkInUse(cUnit, res.lowReg);
+ oatMarkInUse(cUnit, res.highReg);
+ oatMarkPair(cUnit, res.lowReg, res.highReg);
+ return res;
}
extern RegLocation oatGetReturnAlt(CompilationUnit* cUnit)
{
- UNIMPLEMENTED(FATAL);
- RegLocation res = LOC_C_RETURN;
- return res;
+ UNIMPLEMENTED(FATAL);
+ RegLocation res = LOC_C_RETURN;
+ return res;
}
extern RegisterInfo* oatGetRegInfo(CompilationUnit* cUnit, int reg)
{
- return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
+ return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
+ : &cUnit->regPool->coreRegs[reg];
}
/* To be used when explicitly managing register use */
extern void oatLockCallTemps(CompilationUnit* cUnit)
{
- oatLockTemp(cUnit, rARG0);
- oatLockTemp(cUnit, rARG1);
- oatLockTemp(cUnit, rARG2);
- oatLockTemp(cUnit, rARG3);
+ oatLockTemp(cUnit, rARG0);
+ oatLockTemp(cUnit, rARG1);
+ oatLockTemp(cUnit, rARG2);
+ oatLockTemp(cUnit, rARG3);
}
/* To be used when explicitly managing register use */
extern void oatFreeCallTemps(CompilationUnit* cUnit)
{
- oatFreeTemp(cUnit, rARG0);
- oatFreeTemp(cUnit, rARG1);
- oatFreeTemp(cUnit, rARG2);
- oatFreeTemp(cUnit, rARG3);
+ oatFreeTemp(cUnit, rARG0);
+ oatFreeTemp(cUnit, rARG1);
+ oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG3);
}
/* Convert an instruction to a NOP */
void oatNopLIR( LIR* lir)
{
- ((LIR*)lir)->flags.isNop = true;
+ ((LIR*)lir)->flags.isNop = true;
}
} // namespace art
diff --git a/src/compiler/codegen/mips/mips/ArchVariant.cc b/src/compiler/codegen/mips/mips/ArchVariant.cc
index 6b04d70..3018ffe 100644
--- a/src/compiler/codegen/mips/mips/ArchVariant.cc
+++ b/src/compiler/codegen/mips/mips/ArchVariant.cc
@@ -27,32 +27,32 @@
*/
InstructionSet oatInstructionSet()
{
- return kMips;
+ return kMips;
}
/* Architecture-specific initializations and checks go here */
bool oatArchVariantInit(void)
{
- return true;
+ return true;
}
int dvmCompilerTargetOptHint(int key)
{
- int res;
- switch (key) {
- case kMaxHoistDistance:
- res = 2;
- break;
- default:
- LOG(FATAL) << "Unknown target optimization hint key: " << key;
- }
- return res;
+ int res;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 2;
+ break;
+ default:
+ LOG(FATAL) << "Unknown target optimization hint key: " << key;
+ }
+ return res;
}
void oatGenMemBarrier(CompilationUnit *cUnit, int barrierKind)
{
#if ANDROID_SMP != 0
- newLIR1(cUnit, kMipsSync, barrierKind);
+ newLIR1(cUnit, kMipsSync, barrierKind);
#endif
}
diff --git a/src/compiler/codegen/x86/ArchFactory.cc b/src/compiler/codegen/x86/ArchFactory.cc
index 043d66e..dc13238 100644
--- a/src/compiler/codegen/x86/ArchFactory.cc
+++ b/src/compiler/codegen/x86/ArchFactory.cc
@@ -70,7 +70,7 @@
}
bool genOrLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
oatFlushAllRegs(cUnit);
oatLockCallTemps(cUnit); // Prepare for explicit register usage
@@ -149,65 +149,65 @@
void opRegThreadMem(CompilationUnit* cUnit, OpKind op, int rDest, int threadOffset) {
X86OpCode opcode = kX86Bkpt;
switch (op) {
- case kOpCmp: opcode = kX86Cmp32RT; break;
- default:
- LOG(FATAL) << "Bad opcode: " << op;
- break;
+ case kOpCmp: opcode = kX86Cmp32RT; break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
}
newLIR2(cUnit, opcode, rDest, threadOffset);
}
void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
{
- /*
- * On entry, rARG0, rARG1, rARG2 are live. Let the register
- * allocation mechanism know so it doesn't try to use any of them when
- * expanding the frame or flushing. This leaves the utility
- * code with no spare temps.
- */
- oatLockTemp(cUnit, rARG0);
- oatLockTemp(cUnit, rARG1);
- oatLockTemp(cUnit, rARG2);
+ /*
+ * On entry, rARG0, rARG1, rARG2 are live. Let the register
+ * allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with no spare temps.
+ */
+ oatLockTemp(cUnit, rARG0);
+ oatLockTemp(cUnit, rARG1);
+ oatLockTemp(cUnit, rARG2);
- /* Build frame, return address already on stack */
- opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - 4);
+ /* Build frame, return address already on stack */
+ opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - 4);
- /*
- * We can safely skip the stack overflow check if we're
- * a leaf *and* our frame size < fudge factor.
- */
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- ((size_t)cUnit->frameSize <
- Thread::kStackOverflowReservedBytes));
- newLIR0(cUnit, kPseudoMethodEntry);
- /* Spill core callee saves */
- spillCoreRegs(cUnit);
- /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
- DCHECK_EQ(cUnit->numFPSpills, 0);
- if (!skipOverflowCheck) {
- // cmp rSP, fs:[stack_end_]; jcc throw_launchpad
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
- opRegThreadMem(cUnit, kOpCmp, rSP, Thread::StackEndOffset().Int32Value());
- opCondBranch(cUnit, kCondUlt, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- }
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
+ ((size_t)cUnit->frameSize <
+ Thread::kStackOverflowReservedBytes));
+ newLIR0(cUnit, kPseudoMethodEntry);
+ /* Spill core callee saves */
+ spillCoreRegs(cUnit);
+ /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ DCHECK_EQ(cUnit->numFPSpills, 0);
+ if (!skipOverflowCheck) {
+ // cmp rSP, fs:[stack_end_]; jcc throw_launchpad
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
+ opRegThreadMem(cUnit, kOpCmp, rSP, Thread::StackEndOffset().Int32Value());
+ opCondBranch(cUnit, kCondUlt, tgt);
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ }
- flushIns(cUnit);
+ flushIns(cUnit);
- if (cUnit->genDebugger) {
- // Refresh update debugger callout
- UNIMPLEMENTED(WARNING) << "genDebugger";
+ if (cUnit->genDebugger) {
+ // Refresh update debugger callout
+ UNIMPLEMENTED(WARNING) << "genDebugger";
#if 0
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
- genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
+ loadWordDisp(cUnit, rSELF,
+ OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
#endif
- }
+ }
- oatFreeTemp(cUnit, rARG0);
- oatFreeTemp(cUnit, rARG1);
- oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG0);
+ oatFreeTemp(cUnit, rARG1);
+ oatFreeTemp(cUnit, rARG2);
}
void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb) {
@@ -221,7 +221,7 @@
newLIR0(cUnit, kPseudoMethodExit);
/* If we're compiling for the debugger, generate an update callout */
if (cUnit->genDebugger) {
- genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
}
unSpillCoreRegs(cUnit);
/* Remove frame except for return address */
@@ -238,32 +238,32 @@
LIR* thisLIR;
for (thisLIR = (LIR*) cUnit->firstLIRInsn;
- thisLIR != (LIR*) cUnit->lastLIRInsn;
- thisLIR = NEXT_LIR(thisLIR)) {
+ thisLIR != (LIR*) cUnit->lastLIRInsn;
+ thisLIR = NEXT_LIR(thisLIR)) {
- /* Branch to the next instruction */
- if (thisLIR->opcode == kX86Jmp8 || thisLIR->opcode == kX86Jmp32) {
- LIR* nextLIR = thisLIR;
+ /* Branch to the next instruction */
+ if (thisLIR->opcode == kX86Jmp8 || thisLIR->opcode == kX86Jmp32) {
+ LIR* nextLIR = thisLIR;
- while (true) {
- nextLIR = NEXT_LIR(nextLIR);
+ while (true) {
+ nextLIR = NEXT_LIR(nextLIR);
- /*
- * Is the branch target the next instruction?
- */
- if (nextLIR == (LIR*) thisLIR->target) {
- thisLIR->flags.isNop = true;
- break;
- }
+ /*
+ * Is the branch target the next instruction?
+ */
+ if (nextLIR == (LIR*) thisLIR->target) {
+ thisLIR->flags.isNop = true;
+ break;
+ }
- /*
- * Found real useful stuff between the branch and the target.
- * Need to explicitly check the lastLIRInsn here because it
- * might be the last real instruction.
- */
- if (!isPseudoOpcode(nextLIR->opcode) ||
- (nextLIR = (LIR*) cUnit->lastLIRInsn))
- break;
+ /*
+ * Found real useful stuff between the branch and the target.
+ * Need to explicitly check the lastLIRInsn here because it
+ * might be the last real instruction.
+ */
+ if (!isPseudoOpcode(nextLIR->opcode) ||
+ (nextLIR = (LIR*) cUnit->lastLIRInsn))
+ break;
}
}
}
@@ -277,7 +277,8 @@
for (i = 0; i < kX86Last; i++) {
if (EncodingMap[i].opcode != i) {
LOG(FATAL) << "Encoding order for " << EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing " << (int)EncodingMap[i].opcode;
+ << " is wrong: expecting " << i << ", seeing "
+ << (int)EncodingMap[i].opcode;
}
}
diff --git a/src/compiler/codegen/x86/ArchUtility.cc b/src/compiler/codegen/x86/ArchUtility.cc
index d325f5c..4e75ef2 100644
--- a/src/compiler/codegen/x86/ArchUtility.cc
+++ b/src/compiler/codegen/x86/ArchUtility.cc
@@ -24,27 +24,27 @@
/* For dumping instructions */
static const char* x86RegName[] = {
- "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
};
static const char* x86CondName[] = {
- "O",
- "NO",
- "B/NAE/C",
- "NB/AE/NC",
- "Z/EQ",
- "NZ/NE",
- "BE/NA",
- "NBE/A",
- "S",
- "NS",
- "P/PE",
- "NP/PO",
- "L/NGE",
- "NL/GE",
- "LE/NG",
- "NLE/G"
+ "O",
+ "NO",
+ "B/NAE/C",
+ "NB/AE/NC",
+ "Z/EQ",
+ "NZ/NE",
+ "BE/NA",
+ "NBE/A",
+ "S",
+ "NS",
+ "P/PE",
+ "NP/PO",
+ "L/NGE",
+ "NL/GE",
+ "LE/NG",
+ "NLE/G"
};
/*
@@ -95,8 +95,8 @@
break;
case 't':
buf += StringPrintf("0x%08x (L%p)",
- reinterpret_cast<uint32_t>(baseAddr) + lir->offset + operand,
- lir->target);
+ reinterpret_cast<uint32_t>(baseAddr)
+ + lir->offset + operand, lir->target);
break;
default:
buf += StringPrintf("DecodeError '%c'", fmt[i]);
@@ -111,45 +111,45 @@
void oatDumpResourceMask(LIR *lir, u8 mask, const char *prefix)
{
- char buf[256];
- buf[0] = 0;
- LIR *x86LIR = (LIR *) lir;
+ char buf[256];
+ buf[0] = 0;
+ LIR *x86LIR = (LIR *) lir;
- if (mask == ENCODE_ALL) {
- strcpy(buf, "all");
- } else {
- char num[8];
- int i;
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
- for (i = 0; i < kRegEnd; i++) {
- if (mask & (1ULL << i)) {
- sprintf(num, "%d ", i);
- strcat(buf, num);
- }
- }
-
- if (mask & ENCODE_CCODE) {
- strcat(buf, "cc ");
- }
- /* Memory bits */
- if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", x86LIR->aliasInfo & 0xffff,
- (x86LIR->aliasInfo & 0x80000000) ? "(+1)" : "");
- }
- if (mask & ENCODE_LITERAL) {
- strcat(buf, "lit ");
- }
-
- if (mask & ENCODE_HEAP_REF) {
- strcat(buf, "heap ");
- }
- if (mask & ENCODE_MUST_NOT_ALIAS) {
- strcat(buf, "noalias ");
- }
+ for (i = 0; i < kRegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
}
- if (buf[0]) {
- LOG(INFO) << prefix << ": " << buf;
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
}
+ /* Memory bits */
+ if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", x86LIR->aliasInfo & 0xffff,
+ (x86LIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
}
} // namespace art
diff --git a/src/compiler/codegen/x86/Codegen.h b/src/compiler/codegen/x86/Codegen.h
index 52ba7c1..f04acd4 100644
--- a/src/compiler/codegen/x86/Codegen.h
+++ b/src/compiler/codegen/x86/Codegen.h
@@ -74,10 +74,10 @@
*/
inline RegisterClass oatRegClassBySize(OpSize size)
{
- return (size == kUnsignedHalf ||
- size == kSignedHalf ||
- size == kUnsignedByte ||
- size == kSignedByte ) ? kCoreReg : kAnyReg;
+ return (size == kUnsignedHalf ||
+ size == kSignedHalf ||
+ size == kUnsignedByte ||
+ size == kSignedByte ) ? kCoreReg : kAnyReg;
}
/*
@@ -89,12 +89,12 @@
*/
#if __BYTE_ORDER == __LITTLE_ENDIAN
inline s4 s4FromSwitchData(const void* switchData) {
- return *(s4*) switchData;
+ return *(s4*) switchData;
}
#else
inline s4 s4FromSwitchData(const void* switchData) {
- u2* data = switchData;
- return data[0] | (((s4) data[1]) << 16);
+ u2* data = switchData;
+ return data[0] | (((s4) data[1]) << 16);
}
#endif
@@ -102,7 +102,6 @@
extern void oatSetupResourceMasks(LIR* lir);
-extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest,
- int rSrc);
+extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc);
} // namespace art
diff --git a/src/compiler/codegen/x86/FP/X86FP.cc b/src/compiler/codegen/x86/FP/X86FP.cc
index fe0d149..f45a099 100644
--- a/src/compiler/codegen/x86/FP/X86FP.cc
+++ b/src/compiler/codegen/x86/FP/X86FP.cc
@@ -16,8 +16,9 @@
namespace art {
-static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2) {
+static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir,
+ RegLocation rlDest, RegLocation rlSrc1,
+ RegLocation rlSrc2) {
X86OpCode op = kX86Nop;
RegLocation rlResult;
diff --git a/src/compiler/codegen/x86/X86/Factory.cc b/src/compiler/codegen/x86/X86/Factory.cc
index 2bd5b42..c3fb6a6 100644
--- a/src/compiler/codegen/x86/X86/Factory.cc
+++ b/src/compiler/codegen/x86/X86/Factory.cc
@@ -26,23 +26,23 @@
//FIXME: restore "static" when usage uncovered
/*static*/ int coreRegs[] = {
- rAX, rCX, rDX, rBX, rSP, rBP, rSI, rDI
+ rAX, rCX, rDX, rBX, rSP, rBP, rSI, rDI
#ifdef TARGET_REX_SUPPORT
- r8, r9, r10, r11, r12, r13, r14, 15
+ r8, r9, r10, r11, r12, r13, r14, 15
#endif
};
/*static*/ int reservedRegs[] = {rSP};
/*static*/ int coreTemps[] = {rAX, rCX, rDX, rBX};
/*static*/ int fpRegs[] = {
- fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+ fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
#ifdef TARGET_REX_SUPPORT
- fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
#endif
};
/*static*/ int fpTemps[] = {
- fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+ fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
#ifdef TARGET_REX_SUPPORT
- fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
#endif
};
@@ -58,29 +58,29 @@
LIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
{
- int opcode;
- /* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(DOUBLEREG(rDest),DOUBLEREG(rSrc));
- if (DOUBLEREG(rDest)) {
- opcode = kX86MovsdRR;
- } else {
- if (SINGLEREG(rDest)) {
- if (SINGLEREG(rSrc)) {
- opcode = kX86MovssRR;
- } else { // Fpr <- Gpr
- opcode = kX86MovdxrRR;
- }
- } else { // Gpr <- Fpr
- DCHECK(SINGLEREG(rSrc));
- opcode = kX86MovdrxRR;
- }
+ int opcode;
+ /* must be both DOUBLE or both not DOUBLE */
+ DCHECK_EQ(DOUBLEREG(rDest),DOUBLEREG(rSrc));
+ if (DOUBLEREG(rDest)) {
+ opcode = kX86MovsdRR;
+ } else {
+ if (SINGLEREG(rDest)) {
+ if (SINGLEREG(rSrc)) {
+ opcode = kX86MovssRR;
+ } else { // Fpr <- Gpr
+ opcode = kX86MovdxrRR;
+ }
+ } else { // Gpr <- Fpr
+ DCHECK(SINGLEREG(rSrc));
+ opcode = kX86MovdrxRR;
}
- DCHECK((EncodingMap[opcode].flags & IS_BINARY_OP) != 0);
- LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
+ }
+ DCHECK((EncodingMap[opcode].flags & IS_BINARY_OP) != 0);
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
+ if (rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
}
/*
@@ -92,7 +92,8 @@
* 1) rDest is freshly returned from oatAllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest, int value) {
+LIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest, int value)
+{
int rDestSave = rDest;
if (FPREG(rDest)) {
if (value == 0) {
@@ -117,7 +118,8 @@
return res;
}
-LIR* opBranchUnconditional(CompilationUnit *cUnit, OpKind op) {
+LIR* opBranchUnconditional(CompilationUnit *cUnit, OpKind op)
+{
CHECK_EQ(op, kOpUncondBr);
return newLIR1(cUnit, kX86Jmp8, 0 /* offset to be patched */ );
}
@@ -133,7 +135,8 @@
return branch;
}
-LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc) {
+LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc)
+{
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpNeg: opcode = kX86Neg32R; break;
@@ -144,7 +147,8 @@
return newLIR1(cUnit, opcode, rDestSrc);
}
-LIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1, int value) {
+LIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1, int value)
+{
X86OpCode opcode = kX86Bkpt;
bool byteImm = IS_SIMM8(value);
DCHECK(!FPREG(rDestSrc1));
@@ -207,7 +211,9 @@
return newLIR2(cUnit, opcode, rDestSrc1, rSrc2);
}
-LIR* opRegMem(CompilationUnit *cUnit, OpKind op, int rDest, int rBase, int offset) {
+LIR* opRegMem(CompilationUnit *cUnit, OpKind op, int rDest, int rBase,
+ int offset)
+{
X86OpCode opcode = kX86Nop;
switch (op) {
// X86 binary opcodes
@@ -229,7 +235,9 @@
return newLIR3(cUnit, opcode, rDest, rBase, offset);
}
-LIR* opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc1, int rSrc2) {
+LIR* opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc1,
+ int rSrc2)
+{
if (rDest != rSrc1 && rDest != rSrc2) {
if (op == kOpAdd) { // lea special case, except can't encode rbp as base
if (rSrc1 == rSrc2) {
@@ -276,7 +284,9 @@
}
}
-LIR* opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc, int value) {
+LIR* opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc,
+ int value)
+{
if (op == kOpMul) {
X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
return newLIR3(cUnit, opcode, rDest, rSrc, value);
@@ -301,7 +311,8 @@
return opRegImm(cUnit, op, rDest, value);
}
-LIR* opThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset) {
+LIR* opThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset)
+{
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpBlx: opcode = kX86CallT; break;
@@ -312,7 +323,8 @@
return newLIR1(cUnit, opcode, threadOffset);
}
-LIR* opMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp) {
+LIR* opMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp)
+{
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpBlx: opcode = kX86CallM; break;
@@ -324,7 +336,7 @@
}
LIR *loadConstantValueWide(CompilationUnit *cUnit, int rDestLo,
- int rDestHi, int valLo, int valHi)
+ int rDestHi, int valLo, int valHi)
{
LIR *res;
if (FPREG(rDestLo)) {
@@ -354,111 +366,111 @@
LIR *loadBaseIndexed(CompilationUnit *cUnit, int rBase,
int rIndex, int rDest, int scale, OpSize size)
{
- UNIMPLEMENTED(WARNING) << "loadBaseIndexed";
- newLIR0(cUnit, kX86Bkpt);
- return NULL;
+ UNIMPLEMENTED(WARNING) << "loadBaseIndexed";
+ newLIR0(cUnit, kX86Bkpt);
+ return NULL;
#if 0
- LIR *first = NULL;
- LIR *res;
- X86OpCode opcode = kX86Nop;
- int tReg = oatAllocTemp(cUnit);
+ LIR *first = NULL;
+ LIR *res;
+ X86OpCode opcode = kX86Nop;
+ int tReg = oatAllocTemp(cUnit);
- if (FPREG(rDest)) {
- DCHECK(SINGLEREG(rDest));
- DCHECK((size == kWord) || (size == kSingle));
- size = kSingle;
- } else {
- if (size == kSingle)
- size = kWord;
- }
+ if (FPREG(rDest)) {
+ DCHECK(SINGLEREG(rDest));
+ DCHECK((size == kWord) || (size == kSingle));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
- if (!scale) {
- first = newLIR3(cUnit, kX86Addu, tReg , rBase, rIndex);
- } else {
- first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
- newLIR3(cUnit, kX86Addu, tReg , rBase, tReg);
- }
+ if (!scale) {
+ first = newLIR3(cUnit, kX86Addu, tReg , rBase, rIndex);
+ } else {
+ first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
+ newLIR3(cUnit, kX86Addu, tReg , rBase, tReg);
+ }
- switch (size) {
- case kSingle:
- opcode = kX86Flwc1;
- break;
- case kWord:
- opcode = kX86Lw;
- break;
- case kUnsignedHalf:
- opcode = kX86Lhu;
- break;
- case kSignedHalf:
- opcode = kX86Lh;
- break;
- case kUnsignedByte:
- opcode = kX86Lbu;
- break;
- case kSignedByte:
- opcode = kX86Lb;
- break;
- default:
- LOG(FATAL) << "Bad case in loadBaseIndexed";
- }
+ switch (size) {
+ case kSingle:
+ opcode = kX86Flwc1;
+ break;
+ case kWord:
+ opcode = kX86Lw;
+ break;
+ case kUnsignedHalf:
+ opcode = kX86Lhu;
+ break;
+ case kSignedHalf:
+ opcode = kX86Lh;
+ break;
+ case kUnsignedByte:
+ opcode = kX86Lbu;
+ break;
+ case kSignedByte:
+ opcode = kX86Lb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in loadBaseIndexed";
+ }
- res = newLIR3(cUnit, opcode, rDest, 0, tReg);
- oatFreeTemp(cUnit, tReg);
- return (first) ? first : res;
+ res = newLIR3(cUnit, opcode, rDest, 0, tReg);
+ oatFreeTemp(cUnit, tReg);
+ return (first) ? first : res;
#endif
}
LIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask)
{
- UNIMPLEMENTED(WARNING) << "loadMultiple";
- newLIR0(cUnit, kX86Bkpt);
- return NULL;
+ UNIMPLEMENTED(WARNING) << "loadMultiple";
+ newLIR0(cUnit, kX86Bkpt);
+ return NULL;
#if 0
- int i;
- int loadCnt = 0;
- LIR *res = NULL ;
- genBarrier(cUnit);
+ int i;
+ int loadCnt = 0;
+ LIR *res = NULL ;
+ genBarrier(cUnit);
- for (i = 0; i < 8; i++, rMask >>= 1) {
- if (rMask & 0x1) {
- newLIR3(cUnit, kX86Lw, i+r_A0, loadCnt*4, rBase);
- loadCnt++;
- }
+ for (i = 0; i < 8; i++, rMask >>= 1) {
+ if (rMask & 0x1) {
+ newLIR3(cUnit, kX86Lw, i+r_A0, loadCnt*4, rBase);
+ loadCnt++;
}
+ }
- if (loadCnt) {/* increment after */
- newLIR3(cUnit, kX86Addiu, rBase, rBase, loadCnt*4);
- }
+ if (loadCnt) {/* increment after */
+ newLIR3(cUnit, kX86Addiu, rBase, rBase, loadCnt*4);
+ }
- genBarrier(cUnit);
- return res; /* NULL always returned which should be ok since no callers use it */
+ genBarrier(cUnit);
+ return res; /* NULL always returned which should be ok since no callers use it */
#endif
}
LIR *storeMultiple(CompilationUnit *cUnit, int rBase, int rMask)
{
- UNIMPLEMENTED(WARNING) << "storeMultiple";
- newLIR0(cUnit, kX86Bkpt);
- return NULL;
+ UNIMPLEMENTED(WARNING) << "storeMultiple";
+ newLIR0(cUnit, kX86Bkpt);
+ return NULL;
#if 0
- int i;
- int storeCnt = 0;
- LIR *res = NULL ;
- genBarrier(cUnit);
+ int i;
+ int storeCnt = 0;
+ LIR *res = NULL ;
+ genBarrier(cUnit);
- for (i = 0; i < 8; i++, rMask >>= 1) {
- if (rMask & 0x1) {
- newLIR3(cUnit, kX86Sw, i+r_A0, storeCnt*4, rBase);
- storeCnt++;
- }
+ for (i = 0; i < 8; i++, rMask >>= 1) {
+ if (rMask & 0x1) {
+ newLIR3(cUnit, kX86Sw, i+r_A0, storeCnt*4, rBase);
+ storeCnt++;
}
+ }
- if (storeCnt) { /* increment after */
- newLIR3(cUnit, kX86Addiu, rBase, rBase, storeCnt*4);
- }
+ if (storeCnt) { /* increment after */
+ newLIR3(cUnit, kX86Addiu, rBase, rBase, storeCnt*4);
+ }
- genBarrier(cUnit);
- return res; /* NULL always returned which should be ok since no callers use it */
+ genBarrier(cUnit);
+ return res; /* NULL always returned which should be ok since no callers use it */
#endif
}
@@ -524,11 +536,12 @@
load = newLIR3(cUnit, opcode, rDest, rBase, displacement + LOWORD_OFFSET);
} else {
load = newLIR3(cUnit, opcode, rDest, rBase, displacement + LOWORD_OFFSET);
- load2 = newLIR3(cUnit, opcode, rDestHi, rBase, displacement + HIWORD_OFFSET);
+ load2 = newLIR3(cUnit, opcode, rDestHi, rBase,
+ displacement + HIWORD_OFFSET);
}
if (rBase == rSP) {
- annotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
- true /* isLoad */, is64bit);
+ annotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0))
+ >> 2, true /* isLoad */, is64bit);
if (pair) {
annotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
true /* isLoad */, is64bit);
@@ -536,10 +549,13 @@
}
} else {
if (!pair) {
- load = newLIR5(cUnit, opcode, rDest, rBase, rIndex, scale, displacement + LOWORD_OFFSET);
+ load = newLIR5(cUnit, opcode, rDest, rBase, rIndex, scale,
+ displacement + LOWORD_OFFSET);
} else {
- load = newLIR5(cUnit, opcode, rDest, rBase, rIndex, scale, displacement + LOWORD_OFFSET);
- load2 = newLIR5(cUnit, opcode, rDestHi, rBase, rIndex, scale, displacement + HIWORD_OFFSET);
+ load = newLIR5(cUnit, opcode, rDest, rBase, rIndex, scale,
+ displacement + LOWORD_OFFSET);
+ load2 = newLIR5(cUnit, opcode, rDestHi, rBase, rIndex, scale,
+ displacement + HIWORD_OFFSET);
}
}
@@ -619,14 +635,18 @@
store = newLIR3(cUnit, opcode, rBase, displacement + LOWORD_OFFSET, rSrc);
} else {
store = newLIR3(cUnit, opcode, rBase, displacement + LOWORD_OFFSET, rSrc);
- store2 = newLIR3(cUnit, opcode, rBase, displacement + HIWORD_OFFSET, rSrcHi);
+ store2 = newLIR3(cUnit, opcode, rBase, displacement + HIWORD_OFFSET,
+ rSrcHi);
}
} else {
if (!pair) {
- store = newLIR5(cUnit, opcode, rBase, rIndex, scale, displacement + LOWORD_OFFSET, rSrc);
+ store = newLIR5(cUnit, opcode, rBase, rIndex, scale,
+ displacement + LOWORD_OFFSET, rSrc);
} else {
- store = newLIR5(cUnit, opcode, rBase, rIndex, scale, displacement + LOWORD_OFFSET, rSrc);
- store2 = newLIR5(cUnit, opcode, rBase, rIndex, scale, displacement + HIWORD_OFFSET, rSrcHi);
+ store = newLIR5(cUnit, opcode, rBase, rIndex, scale,
+ displacement + LOWORD_OFFSET, rSrc);
+ store2 = newLIR5(cUnit, opcode, rBase, rIndex, scale,
+ displacement + HIWORD_OFFSET, rSrcHi);
}
}
@@ -634,34 +654,38 @@
}
/* store value base base + scaled index. */
-LIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase, int rIndex, int rSrc, int scale,
- OpSize size)
+LIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase, int rIndex, int rSrc,
+ int scale, OpSize size)
{
return storeBaseIndexedDisp(cUnit, NULL, rBase, rIndex, scale, 0,
rSrc, INVALID_REG, size, INVALID_SREG);
}
-LIR *storeBaseDisp(CompilationUnit *cUnit, int rBase, int displacement, int rSrc, OpSize size) {
- return storeBaseIndexedDisp(cUnit, NULL, rBase, INVALID_REG, 0, displacement,
- rSrc, INVALID_REG, size, INVALID_SREG);
+LIR *storeBaseDisp(CompilationUnit *cUnit, int rBase, int displacement,
+ int rSrc, OpSize size)
+{
+ return storeBaseIndexedDisp(cUnit, NULL, rBase, INVALID_REG, 0,
+ displacement, rSrc, INVALID_REG, size,
+ INVALID_SREG);
}
LIR *storeBaseDispWide(CompilationUnit *cUnit, int rBase, int displacement,
- int rSrcLo, int rSrcHi) {
+ int rSrcLo, int rSrcHi)
+{
return storeBaseIndexedDisp(cUnit, NULL, rBase, INVALID_REG, 0, displacement,
rSrcLo, rSrcHi, kLong, INVALID_SREG);
}
void storePair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
{
- storeWordDisp(cUnit, base, 0, lowReg);
- storeWordDisp(cUnit, base, 4, highReg);
+ storeWordDisp(cUnit, base, 0, lowReg);
+ storeWordDisp(cUnit, base, 4, highReg);
}
void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
{
- loadWordDisp(cUnit, base, 0, lowReg);
- loadWordDisp(cUnit, base, 4, highReg);
+ loadWordDisp(cUnit, base, 0, lowReg);
+ loadWordDisp(cUnit, base, 4, highReg);
}
} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Gen.cc b/src/compiler/codegen/x86/X86/Gen.cc
index ffc5952..62ff3ad 100644
--- a/src/compiler/codegen/x86/X86/Gen.cc
+++ b/src/compiler/codegen/x86/X86/Gen.cc
@@ -27,7 +27,7 @@
void genSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
SpecialCaseHandler specialCase)
{
- // TODO
+ // TODO
}
/*
@@ -36,13 +36,13 @@
LIR* genRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
int reg1, int base, int offset, MIR* mir, ThrowKind kind)
{
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- mir ? mir->offset : 0, reg1, base, offset);
- opRegMem(cUnit, kOpCmp, reg1, base, offset);
- LIR* branch = opCondBranch(cUnit, cCode, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
+ mir ? mir->offset : 0, reg1, base, offset);
+ opRegMem(cUnit, kOpCmp, reg1, base, offset);
+ LIR* branch = opCondBranch(cUnit, cCode, tgt);
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ return branch;
}
/*
@@ -51,7 +51,9 @@
*/
BasicBlock *findBlock(CompilationUnit* cUnit, unsigned int codeOffset,
bool split, bool create, BasicBlock** immedPredBlockP);
-void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc, LIR* labelList) {
+void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
+ LIR* labelList)
+{
const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
if (cUnit->printMe) {
dumpSparseSwitchTable(table);
@@ -64,7 +66,8 @@
int key = keys[i];
BasicBlock* case_block = findBlock(cUnit, mir->offset + targets[i],
false, false, NULL);
- opCmpImmBranch(cUnit, kCondEq, rlSrc.lowReg, key, &labelList[case_block->id]);
+ opCmpImmBranch(cUnit, kCondEq, rlSrc.lowReg, key,
+ &labelList[case_block->id]);
}
}
@@ -84,7 +87,8 @@
* jmp rStartOfMethod
* done:
*/
-void genPackedSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc) {
+void genPackedSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+{
const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
if (cUnit->printMe) {
dumpPackedSwitchTable(table);
@@ -120,7 +124,8 @@
// Load the displacement from the switch table
int dispReg = oatAllocTemp(cUnit);
- newLIR5(cUnit, kX86PcRelLoadRA, dispReg, startOfMethodReg, keyReg, 2, (intptr_t)tabRec);
+ newLIR5(cUnit, kX86PcRelLoadRA, dispReg, startOfMethodReg, keyReg, 2,
+ (intptr_t)tabRec);
// Add displacement to start of method
opRegReg(cUnit, kOpAdd, startOfMethodReg, dispReg);
// ..and go!
@@ -132,7 +137,8 @@
branchOver->target = (LIR*)target;
}
-void callRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1);
+void callRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset,
+ int arg0, int arg1);
/*
* Array data table format:
* ushort ident = 0x0300 magic value
@@ -147,7 +153,8 @@
{
const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
// Add the table to the list - we'll process it later
- FillArrayData *tabRec = (FillArrayData *)oatNew(cUnit, sizeof(FillArrayData), true, kAllocData);
+ FillArrayData *tabRec = (FillArrayData *)oatNew(cUnit, sizeof(FillArrayData),
+ true, kAllocData);
tabRec->table = table;
tabRec->vaddr = mir->offset;
u2 width = tabRec->table[1];
@@ -163,35 +170,37 @@
newLIR1(cUnit, kX86StartOfMethod, rARG2);
newLIR2(cUnit, kX86PcRelAdr, rARG1, (intptr_t)tabRec);
newLIR2(cUnit, kX86Add32RR, rARG1, rARG2);
- callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rARG0, rARG1);
+ callRuntimeHelperRegReg(cUnit,
+ ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ rARG0, rARG1);
}
void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- UNIMPLEMENTED(WARNING) << "genNegFloat " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- newLIR0(cUnit, kX86Bkpt);
+ UNIMPLEMENTED(WARNING) << "genNegFloat "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ newLIR0(cUnit, kX86Bkpt);
#if 0
- RegLocation rlResult;
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegImm(cUnit, kOpAdd, rlResult.lowReg,
- rlSrc.lowReg, 0x80000000);
- storeValue(cUnit, rlDest, rlResult);
+ RegLocation rlResult;
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, 0x80000000);
+ storeValue(cUnit, rlDest, rlResult);
#endif
}
void genNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- UNIMPLEMENTED(WARNING) << "genNegDouble" << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- newLIR0(cUnit, kX86Bkpt);
+ UNIMPLEMENTED(WARNING) << "genNegDouble"
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ newLIR0(cUnit, kX86Bkpt);
#if 0
- RegLocation rlResult;
- rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg,
- 0x80000000);
- opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- storeValueWide(cUnit, rlDest, rlResult);
+ RegLocation rlResult;
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, 0x80000000);
+ opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ storeValueWide(cUnit, rlDest, rlResult);
#endif
}
@@ -203,12 +212,12 @@
*/
void genMonitorEnter(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
- // Go expensive route - artLockObjectFromCode(self, obj);
- callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARG0);
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ // Go expensive route - artLockObjectFromCode(self, obj);
+ callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARG0);
}
/*
@@ -216,12 +225,12 @@
*/
void genMonitorExit(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
- // Go expensive route - UnlockObjectFromCode(obj);
- callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARG0);
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ // Go expensive route - UnlockObjectFromCode(obj);
+ callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARG0);
}
/*
@@ -243,20 +252,20 @@
void genCmpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc1, RegLocation rlSrc2)
{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc1, r2, r3);
- // Compute (r1:r0) = (r1:r0) - (r2:r3)
- opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
- opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
- opRegReg(cUnit, kOpOr, r0, r1); // r0 = high | low - sets ZF
- newLIR2(cUnit, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r2:r3) ? 1 : 0
- newLIR2(cUnit, kX86Movzx8RR, r0, r0);
- opRegImm(cUnit, kOpAsr, r1, 31); // r1 = high >> 31
- opRegReg(cUnit, kOpOr, r0, r1); // r0 holds result
- RegLocation rlResult = LOC_C_RETURN;
- storeValue(cUnit, rlDest, rlResult);
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc1, r2, r3);
+ // Compute (r1:r0) = (r1:r0) - (r2:r3)
+ opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ opRegReg(cUnit, kOpOr, r0, r1); // r0 = high | low - sets ZF
+ newLIR2(cUnit, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r2:r3) ? 1 : 0
+ newLIR2(cUnit, kX86Movzx8RR, r0, r0);
+ opRegImm(cUnit, kOpAsr, r1, 31); // r1 = high >> 31
+ opRegReg(cUnit, kOpOr, r0, r1); // r0 holds result
+ RegLocation rlResult = LOC_C_RETURN;
+ storeValue(cUnit, rlDest, rlResult);
}
X86ConditionCode oatX86ConditionEncoding(ConditionCode cond) {
@@ -281,11 +290,13 @@
return kX86CondO;
}
-LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1, int src2, LIR* target)
+LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+ int src2, LIR* target)
{
newLIR2(cUnit, kX86Cmp32RR, src1, src2);
X86ConditionCode cc = oatX86ConditionEncoding(cond);
- LIR* branch = newLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
+ cc);
branch->target = target;
return branch;
}
@@ -307,25 +318,26 @@
LIR* opRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
{
- if (FPREG(rDest) || FPREG(rSrc))
- return fpRegCopy(cUnit, rDest, rSrc);
- LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kX86Mov32RR,
- rDest, rSrc);
- if (rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
+ if (FPREG(rDest) || FPREG(rSrc))
+ return fpRegCopy(cUnit, rDest, rSrc);
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kX86Mov32RR,
+ rDest, rSrc);
+ if (rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
}
LIR* opRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
{
- LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
- oatAppendLIR(cUnit, res);
- return res;
+ LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
+ oatAppendLIR(cUnit, res);
+ return res;
}
void opRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
- int srcLo, int srcHi) {
+ int srcLo, int srcHi)
+{
bool destFP = FPREG(destLo) && FPREG(destHi);
bool srcFP = FPREG(srcLo) && FPREG(srcHi);
assert(FPREG(srcLo) == FPREG(srcHi));
diff --git a/src/compiler/codegen/x86/X86/Ralloc.cc b/src/compiler/codegen/x86/X86/Ralloc.cc
index 38b9df8..2886b8f 100644
--- a/src/compiler/codegen/x86/X86/Ralloc.cc
+++ b/src/compiler/codegen/x86/X86/Ralloc.cc
@@ -29,7 +29,8 @@
* high reg in next byte.
*/
int oatAllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
- int regClass) {
+ int regClass)
+{
int highReg;
int lowReg;
int res = 0;
@@ -65,12 +66,12 @@
cUnit->regPool = pool;
pool->numCoreRegs = numRegs;
pool->coreRegs = (RegisterInfo *)
- oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs),
- true, kAllocRegAlloc);
+ oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs), true,
+ kAllocRegAlloc);
pool->numFPRegs = numFPRegs;
pool->FPRegs = (RegisterInfo *)
- oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
- kAllocRegAlloc);
+ oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
+ kAllocRegAlloc);
oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
// Keep special registers from being allocated
@@ -104,7 +105,8 @@
}
void freeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
- RegLocation rlFree) {
+ RegLocation rlFree)
+{
if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
(rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
// No overlap, free both
diff --git a/src/compiler/codegen/x86/X86LIR.h b/src/compiler/codegen/x86/X86LIR.h
index 85d2565..3646a1f 100644
--- a/src/compiler/codegen/x86/X86LIR.h
+++ b/src/compiler/codegen/x86/X86LIR.h
@@ -144,18 +144,18 @@
#define LOC_C_RETURN_WIDE_DOUBLE {kLocPhysReg, 1, 0, 1, 0, 0, 1, fr0, fr1, INVALID_SREG}
enum ResourceEncodingPos {
- kGPReg0 = 0,
- kRegSP = 4,
- kRegLR = -1,
- kFPReg0 = 16, // xmm0 .. xmm7/xmm15
- kFPRegEnd = 32,
- kRegEnd = kFPRegEnd,
- kCCode = kRegEnd,
- // The following four bits are for memory disambiguation
- kDalvikReg, // 1 Dalvik Frame (can be fully disambiguated)
- kLiteral, // 2 Literal pool (can be fully disambiguated)
- kHeapRef, // 3 Somewhere on the heap (alias with any other heap)
- kMustNotAlias, // 4 Guaranteed to be non-alias (eg *(r6+x))
+ kGPReg0 = 0,
+ kRegSP = 4,
+ kRegLR = -1,
+ kFPReg0 = 16, // xmm0 .. xmm7/xmm15
+ kFPRegEnd = 32,
+ kRegEnd = kFPRegEnd,
+ kCCode = kRegEnd,
+ // The following four bits are for memory disambiguation
+ kDalvikReg, // 1 Dalvik Frame (can be fully disambiguated)
+ kLiteral, // 2 Literal pool (can be fully disambiguated)
+ kHeapRef, // 3 Somewhere on the heap (alias with any other heap)
+ kMustNotAlias, // 4 Guaranteed to be non-alias (eg *(r6+x))
};
#define ENCODE_REG_LIST(N) ((u8) N)
@@ -242,49 +242,49 @@
/* X86 condition encodings */
enum X86ConditionCode {
- kX86CondO = 0x0, // overflow
- kX86CondNo = 0x1, // not overflow
+ kX86CondO = 0x0, // overflow
+ kX86CondNo = 0x1, // not overflow
- kX86CondB = 0x2, // below
- kX86CondNae = kX86CondB, // not-above-equal
- kX86CondC = kX86CondB, // carry
+ kX86CondB = 0x2, // below
+ kX86CondNae = kX86CondB, // not-above-equal
+ kX86CondC = kX86CondB, // carry
- kX86CondNb = 0x3, // not-below
- kX86CondAe = kX86CondNb, // above-equal
- kX86CondNc = kX86CondNb, // not-carry
+ kX86CondNb = 0x3, // not-below
+ kX86CondAe = kX86CondNb, // above-equal
+ kX86CondNc = kX86CondNb, // not-carry
- kX86CondZ = 0x4, // zero
- kX86CondEq = kX86CondZ, // equal
+ kX86CondZ = 0x4, // zero
+ kX86CondEq = kX86CondZ, // equal
- kX86CondNz = 0x5, // not-zero
- kX86CondNe = kX86CondNz, // not-equal
+ kX86CondNz = 0x5, // not-zero
+ kX86CondNe = kX86CondNz, // not-equal
- kX86CondBe = 0x6, // below-equal
- kX86CondNa = kX86CondBe, // not-above
+ kX86CondBe = 0x6, // below-equal
+ kX86CondNa = kX86CondBe, // not-above
- kX86CondNbe = 0x7, // not-below-equal
- kX86CondA = kX86CondNbe,// above
+ kX86CondNbe = 0x7, // not-below-equal
+ kX86CondA = kX86CondNbe,// above
- kX86CondS = 0x8, // sign
- kX86CondNs = 0x9, // not-sign
+ kX86CondS = 0x8, // sign
+ kX86CondNs = 0x9, // not-sign
- kX86CondP = 0xA, // 8-bit parity even
- kX86CondPE = kX86CondP,
+ kX86CondP = 0xA, // 8-bit parity even
+ kX86CondPE = kX86CondP,
- kX86CondNp = 0xB, // 8-bit parity odd
- kX86CondPo = kX86CondNp,
+ kX86CondNp = 0xB, // 8-bit parity odd
+ kX86CondPo = kX86CondNp,
- kX86CondL = 0xC, // less-than
- kX86CondNge = kX86CondL, // not-greater-equal
+ kX86CondL = 0xC, // less-than
+ kX86CondNge = kX86CondL, // not-greater-equal
- kX86CondNl = 0xD, // not-less-than
- kX86CondGe = kX86CondNl, // not-greater-equal
+ kX86CondNl = 0xD, // not-less-than
+ kX86CondGe = kX86CondNl, // not-greater-equal
- kX86CondLe = 0xE, // less-than-equal
- kX86CondNg = kX86CondLe, // not-greater
+ kX86CondLe = 0xE, // less-than-equal
+ kX86CondNg = kX86CondLe, // not-greater
- kX86CondNle = 0xF, // not-less-than
- kX86CondG = kX86CondNle,// greater
+ kX86CondNle = 0xF, // not-less-than
+ kX86CondG = kX86CondNle,// greater
};
/*
@@ -293,49 +293,49 @@
* Assemble.cc.
*/
enum X86OpCode {
- kPseudoIntrinsicRetry = -16,
- kPseudoSuspendTarget = -15,
- kPseudoThrowTarget = -14,
- kPseudoCaseLabel = -13,
- kPseudoMethodEntry = -12,
- kPseudoMethodExit = -11,
- kPseudoBarrier = -10,
- kPseudoExtended = -9,
- kPseudoSSARep = -8,
- kPseudoEntryBlock = -7,
- kPseudoExitBlock = -6,
- kPseudoTargetLabel = -5,
- kPseudoDalvikByteCodeBoundary = -4,
- kPseudoPseudoAlign4 = -3,
- kPseudoEHBlockLabel = -2,
- kPseudoNormalBlockLabel = -1,
- kX86First,
- kX8632BitData = kX86First, /* data [31..0] */
- kX86Bkpt,
- kX86Nop,
- // Define groups of binary operations
- // MR - Memory Register - opcode [base + disp], reg
- // - lir operands - 0: base, 1: disp, 2: reg
- // AR - Array Register - opcode [base + index * scale + disp], reg
- // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
- // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
- // - lir operands - 0: disp, 1: reg
- // RR - Register Register - opcode reg1, reg2
- // - lir operands - 0: reg1, 1: reg2
- // RM - Register Memory - opcode reg, [base + disp]
- // - lir operands - 0: reg, 1: base, 2: disp
- // RA - Register Array - opcode reg, [base + index * scale + disp]
- // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
- // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
- // - lir operands - 0: reg, 1: disp
- // RI - Register Immediate - opcode reg, #immediate
- // - lir operands - 0: reg, 1: immediate
- // MI - Memory Immediate - opcode [base + disp], #immediate
- // - lir operands - 0: base, 1: disp, 2: immediate
- // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
- // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
- // TI - Thread Register - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
- // - lir operands - 0: disp, 1: imm
+ kPseudoIntrinsicRetry = -16,
+ kPseudoSuspendTarget = -15,
+ kPseudoThrowTarget = -14,
+ kPseudoCaseLabel = -13,
+ kPseudoMethodEntry = -12,
+ kPseudoMethodExit = -11,
+ kPseudoBarrier = -10,
+ kPseudoExtended = -9,
+ kPseudoSSARep = -8,
+ kPseudoEntryBlock = -7,
+ kPseudoExitBlock = -6,
+ kPseudoTargetLabel = -5,
+ kPseudoDalvikByteCodeBoundary = -4,
+ kPseudoPseudoAlign4 = -3,
+ kPseudoEHBlockLabel = -2,
+ kPseudoNormalBlockLabel = -1,
+ kX86First,
+ kX8632BitData = kX86First, /* data [31..0] */
+ kX86Bkpt,
+ kX86Nop,
+ // Define groups of binary operations
+ // MR - Memory Register - opcode [base + disp], reg
+ // - lir operands - 0: base, 1: disp, 2: reg
+ // AR - Array Register - opcode [base + index * scale + disp], reg
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+ // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
+ // - lir operands - 0: disp, 1: reg
+ // RR - Register Register - opcode reg1, reg2
+ // - lir operands - 0: reg1, 1: reg2
+ // RM - Register Memory - opcode reg, [base + disp]
+ // - lir operands - 0: reg, 1: base, 2: disp
+ // RA - Register Array - opcode reg, [base + index * scale + disp]
+ // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+ // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
+ // - lir operands - 0: reg, 1: disp
+ // RI - Register Immediate - opcode reg, #immediate
+ // - lir operands - 0: reg, 1: immediate
+ // MI - Memory Immediate - opcode [base + disp], #immediate
+ // - lir operands - 0: base, 1: disp, 2: immediate
+ // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+ // TI - Thread Register - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
+ // - lir operands - 0: disp, 1: imm
#define BinaryOpCode(opcode) \
opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
@@ -348,118 +348,118 @@
opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
- BinaryOpCode(kX86Add),
- BinaryOpCode(kX86Or),
- BinaryOpCode(kX86Adc),
- BinaryOpCode(kX86Sbb),
- BinaryOpCode(kX86And),
- BinaryOpCode(kX86Sub),
- BinaryOpCode(kX86Xor),
- BinaryOpCode(kX86Cmp),
+ BinaryOpCode(kX86Add),
+ BinaryOpCode(kX86Or),
+ BinaryOpCode(kX86Adc),
+ BinaryOpCode(kX86Sbb),
+ BinaryOpCode(kX86And),
+ BinaryOpCode(kX86Sub),
+ BinaryOpCode(kX86Xor),
+ BinaryOpCode(kX86Cmp),
#undef BinaryOpCode
- kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
- kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
- kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
- kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
- kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
- kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
- kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
- kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
- kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
- kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
- kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
- kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
- kX86Lea32RA,
- // RC - Register CL - opcode reg, CL
- // - lir operands - 0: reg, 1: CL
- // MC - Memory CL - opcode [base + disp], CL
- // - lir operands - 0: base, 1: disp, 2: CL
- // AC - Array CL - opcode [base + index * scale + disp], CL
- // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
+ kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
+ kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
+ kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
+ kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
+ kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
+ kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
+ kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
+ kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
+ kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
+ kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
+ kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
+ kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
+ kX86Lea32RA,
+ // RC - Register CL - opcode reg, CL
+ // - lir operands - 0: reg, 1: CL
+ // MC - Memory CL - opcode [base + disp], CL
+ // - lir operands - 0: base, 1: disp, 2: CL
+ // AC - Array CL - opcode [base + index * scale + disp], CL
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
#define BinaryShiftOpCode(opcode) \
- opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
- opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
- opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
- opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
- opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
- opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
- BinaryShiftOpCode(kX86Rol),
- BinaryShiftOpCode(kX86Ror),
- BinaryShiftOpCode(kX86Rcl),
- BinaryShiftOpCode(kX86Rcr),
- BinaryShiftOpCode(kX86Sal),
- BinaryShiftOpCode(kX86Shr),
- BinaryShiftOpCode(kX86Sar),
+ opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
+ opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
+ opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
+ opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
+ opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
+ opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
+ BinaryShiftOpCode(kX86Rol),
+ BinaryShiftOpCode(kX86Ror),
+ BinaryShiftOpCode(kX86Rcl),
+ BinaryShiftOpCode(kX86Rcr),
+ BinaryShiftOpCode(kX86Sal),
+ BinaryShiftOpCode(kX86Shr),
+ BinaryShiftOpCode(kX86Sar),
#undef BinaryShiftOpcode
#define UnaryOpcode(opcode, reg, mem, array) \
- opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
- opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
- opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
- UnaryOpcode(kX86Test, RI, MI, AI),
- UnaryOpcode(kX86Not, R, M, A),
- UnaryOpcode(kX86Neg, R, M, A),
- UnaryOpcode(kX86Mul, DaR, DaM, DaA),
- UnaryOpcode(kX86Imul, DaR, DaM, DaA),
- UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
- UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
+ opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
+ opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
+ opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
+ UnaryOpcode(kX86Test, RI, MI, AI),
+ UnaryOpcode(kX86Not, R, M, A),
+ UnaryOpcode(kX86Neg, R, M, A),
+ UnaryOpcode(kX86Mul, DaR, DaM, DaA),
+ UnaryOpcode(kX86Imul, DaR, DaM, DaA),
+ UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
+ UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
#undef UnaryOpcode
#define Binary0fOpCode(opcode) \
opcode ## RR, opcode ## RM, opcode ## RA
- Binary0fOpCode(kX86Movsd),
- kX86MovsdMR,
- kX86MovsdAR,
- Binary0fOpCode(kX86Movss),
- kX86MovssMR,
- kX86MovssAR,
- Binary0fOpCode(kX86Cvtsi2sd), // int to double
- Binary0fOpCode(kX86Cvtsi2ss), // int to float
- Binary0fOpCode(kX86Cvttsd2si), // truncating double to int
- Binary0fOpCode(kX86Cvttss2si), // truncating float to int
- Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
- Binary0fOpCode(kX86Cvtss2si), // rounding float to int
- Binary0fOpCode(kX86Ucomisd), // unordered double compare
- Binary0fOpCode(kX86Ucomiss), // unordered float compare
- Binary0fOpCode(kX86Comisd), // double compare
- Binary0fOpCode(kX86Comiss), // float compare
- Binary0fOpCode(kX86Orps), // or of floating point registers
- Binary0fOpCode(kX86Xorps), // xor of floating point registers
- Binary0fOpCode(kX86Addsd), // double add
- Binary0fOpCode(kX86Addss), // float add
- Binary0fOpCode(kX86Mulsd), // double multiply
- Binary0fOpCode(kX86Mulss), // float multiply
- Binary0fOpCode(kX86Cvtss2sd), // float to double
- Binary0fOpCode(kX86Cvtsd2ss), // double to float
- Binary0fOpCode(kX86Subsd), // double subtract
- Binary0fOpCode(kX86Subss), // float subtract
- Binary0fOpCode(kX86Divsd), // double divide
- Binary0fOpCode(kX86Divss), // float divide
- kX86PsllqRI, // shift of floating point registers
- Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
- Binary0fOpCode(kX86Movdrx), // move into reg from xmm
- kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
- kX86Mfence, // memory barrier
- Binary0fOpCode(kX86Imul16), // 16bit multiply
- Binary0fOpCode(kX86Imul32), // 32bit multiply
- Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
- Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
- Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
- Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
+ Binary0fOpCode(kX86Movsd),
+ kX86MovsdMR,
+ kX86MovsdAR,
+ Binary0fOpCode(kX86Movss),
+ kX86MovssMR,
+ kX86MovssAR,
+ Binary0fOpCode(kX86Cvtsi2sd), // int to double
+ Binary0fOpCode(kX86Cvtsi2ss), // int to float
+ Binary0fOpCode(kX86Cvttsd2si),// truncating double to int
+ Binary0fOpCode(kX86Cvttss2si),// truncating float to int
+ Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
+ Binary0fOpCode(kX86Cvtss2si), // rounding float to int
+ Binary0fOpCode(kX86Ucomisd), // unordered double compare
+ Binary0fOpCode(kX86Ucomiss), // unordered float compare
+ Binary0fOpCode(kX86Comisd), // double compare
+ Binary0fOpCode(kX86Comiss), // float compare
+ Binary0fOpCode(kX86Orps), // or of floating point registers
+ Binary0fOpCode(kX86Xorps), // xor of floating point registers
+ Binary0fOpCode(kX86Addsd), // double add
+ Binary0fOpCode(kX86Addss), // float add
+ Binary0fOpCode(kX86Mulsd), // double multiply
+ Binary0fOpCode(kX86Mulss), // float multiply
+ Binary0fOpCode(kX86Cvtss2sd), // float to double
+ Binary0fOpCode(kX86Cvtsd2ss), // double to float
+ Binary0fOpCode(kX86Subsd), // double subtract
+ Binary0fOpCode(kX86Subss), // float subtract
+ Binary0fOpCode(kX86Divsd), // double divide
+ Binary0fOpCode(kX86Divss), // float divide
+ kX86PsllqRI, // shift of floating point registers
+ Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
+ Binary0fOpCode(kX86Movdrx), // move into reg from xmm
+ kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
+ kX86Mfence, // memory barrier
+ Binary0fOpCode(kX86Imul16), // 16bit multiply
+ Binary0fOpCode(kX86Imul32), // 32bit multiply
+ Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
+ Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
+ Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
+ Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
#undef Binary0fOpCode
- kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
- kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
- kX86JmpR, // jmp reg; lir operands - 0: reg
- kX86CallR, // call reg; lir operands - 0: reg
- kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
- kX86CallA, // call [base + index * scale + disp]
- // lir operands - 0: base, 1: index, 2: scale, 3: disp
- kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
- kX86Ret, // ret; no lir operands
- kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
- // lir operands - 0: reg
- kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
- // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
- kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
- kX86Last
+ kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
+ kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
+ kX86JmpR, // jmp reg; lir operands - 0: reg
+ kX86CallR, // call reg; lir operands - 0: reg
+ kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
+ kX86CallA, // call [base + index * scale + disp]
+ // lir operands - 0: base, 1: index, 2: scale, 3: disp
+ kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
+ kX86Ret, // ret; no lir operands
+ kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
+ // lir operands - 0: reg
+ kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
+ // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+ kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
+ kX86Last
};
/* Instruction assembly fieldLoc kind */
@@ -477,10 +477,10 @@
kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
- kJmp, kJcc, kCall, // Branch instruction kinds.
- kPcRel, // Operation with displacement that is PC relative
- kMacro, // An instruction composing multiple others
- kUnimplemented // Encoding used when an instruction isn't yet implemented.
+ kJmp, kJcc, kCall, // Branch instruction kinds.
+ kPcRel, // Operation with displacement that is PC relative
+ kMacro, // An instruction composing multiple others
+ kUnimplemented // Encoding used when an instruction isn't yet implemented.
};
/* Struct used to define the EncodingMap positions for each X86 opcode */
@@ -489,16 +489,16 @@
X86EncodingKind kind; // Used to discriminate in the union below
int flags;
struct {
- uint8_t prefix1; // non-zero => a prefix byte
- uint8_t prefix2; // non-zero => a second prefix byte
- uint8_t opcode; // 1 byte opcode
- uint8_t extra_opcode1; // possible extra opcode byte
- uint8_t extra_opcode2; // possible second extra opcode byte
- // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
- // encoding kind
- uint8_t modrm_opcode;
- uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
- uint8_t immediate_bytes; // number of bytes of immediate
+ uint8_t prefix1; // non-zero => a prefix byte
+ uint8_t prefix2; // non-zero => a second prefix byte
+ uint8_t opcode; // 1 byte opcode
+ uint8_t extra_opcode1; // possible extra opcode byte
+ uint8_t extra_opcode2; // possible second extra opcode byte
+ // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
+ // encoding kind
+ uint8_t modrm_opcode;
+ uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
+ uint8_t immediate_bytes; // number of bytes of immediate
} skeleton;
const char *name;
const char* fmt;
@@ -512,32 +512,32 @@
/* Bit flags describing the behavior of each native opcode */
enum X86OpFeatureFlags {
- kIsBranch = 0,
- kRegDef0,
- kRegDef1,
- kRegDefSP,
- kRegDefList0,
- kRegDefList1,
- kRegUse0,
- kRegUse1,
- kRegUse2,
- kRegUse3,
- kRegUseSP,
- kRegUseList0,
- kRegUseList1,
- kNoOperand,
- kIsUnaryOp,
- kIsBinaryOp,
- kIsTertiaryOp,
- kIsQuadOp,
- kIsQuinOp,
- kIsSextupleOp,
- kIsIT,
- kSetsCCodes,
- kUsesCCodes,
- kMemLoad,
- kMemStore,
- kPCRelFixup,
+ kIsBranch = 0,
+ kRegDef0,
+ kRegDef1,
+ kRegDefSP,
+ kRegDefList0,
+ kRegDefList1,
+ kRegUse0,
+ kRegUse1,
+ kRegUse2,
+ kRegUse3,
+ kRegUseSP,
+ kRegUseList0,
+ kRegUseList1,
+ kNoOperand,
+ kIsUnaryOp,
+ kIsBinaryOp,
+ kIsTertiaryOp,
+ kIsQuadOp,
+ kIsQuinOp,
+ kIsSextupleOp,
+ kIsIT,
+ kSetsCCodes,
+ kUsesCCodes,
+ kMemLoad,
+ kMemStore,
+ kPCRelFixup,
// FIXME: add NEEDS_FIXUP to instruction attributes
};
@@ -591,7 +591,7 @@
/* Keys for target-specific scheduling and other optimization hints */
enum X86TargetOptHints {
- kMaxHoistDistance,
+ kMaxHoistDistance,
};
/* Offsets of high and low halves of a 64bit value */
diff --git a/src/compiler/codegen/x86/X86RallocUtil.cc b/src/compiler/codegen/x86/X86RallocUtil.cc
index a85cb8a..58ad25a 100644
--- a/src/compiler/codegen/x86/X86RallocUtil.cc
+++ b/src/compiler/codegen/x86/X86RallocUtil.cc
@@ -41,64 +41,60 @@
*/
void oatMarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg)
{
- UNIMPLEMENTED(WARNING) << "oatMarkPreservedSingle";
+ UNIMPLEMENTED(WARNING) << "oatMarkPreservedSingle";
#if 0
- LOG(FATAL) << "No support yet for promoted FP regs";
+ LOG(FATAL) << "No support yet for promoted FP regs";
#endif
}
void oatFlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
{
- RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
- DCHECK(info1 && info2 && info1->pair && info2->pair &&
- (info1->partner == info2->reg) &&
- (info2->partner == info1->reg));
- if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
- LOG(FATAL) << "Long half-temp, half-promoted";
- }
-
- info1->dirty = false;
- info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) <
- SRegToVReg(cUnit, info1->sReg))
- info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- oatFlushRegWideImpl(cUnit, rSP,
- oatVRegOffset(cUnit, vReg),
- info1->reg, info1->partner);
+ RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
+ RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->isTemp && info2->isTemp)) {
+ /* Should not happen. If it does, there's a problem in evalLoc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
}
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (SRegToVReg(cUnit, info2->sReg) < SRegToVReg(cUnit, info1->sReg))
+ info1 = info2;
+ int vReg = SRegToVReg(cUnit, info1->sReg);
+ oatFlushRegWideImpl(cUnit, rSP, oatVRegOffset(cUnit, vReg),
+ info1->reg, info1->partner);
+ }
}
void oatFlushReg(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- if (info->live && info->dirty) {
- info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- oatFlushRegImpl(cUnit, rSP,
- oatVRegOffset(cUnit, vReg),
- reg, kWord);
- }
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int vReg = SRegToVReg(cUnit, info->sReg);
+ oatFlushRegImpl(cUnit, rSP, oatVRegOffset(cUnit, vReg), reg, kWord);
+ }
}
/* Give access to the target-dependent FP register encoding to common code */
bool oatIsFpReg(int reg) {
- return FPREG(reg);
+ return FPREG(reg);
}
uint32_t oatFpRegMask() {
- return FP_REG_MASK;
+ return FP_REG_MASK;
}
/* Clobber all regs that might be used by an external C call */
extern void oatClobberCalleeSave(CompilationUnit *cUnit)
{
- oatClobber(cUnit, rAX);
- oatClobber(cUnit, rCX);
- oatClobber(cUnit, rDX);
+ oatClobber(cUnit, rAX);
+ oatClobber(cUnit, rCX);
+ oatClobber(cUnit, rDX);
}
extern RegLocation oatGetReturnWideAlt(CompilationUnit* cUnit) {
@@ -115,41 +111,41 @@
extern RegLocation oatGetReturnAlt(CompilationUnit* cUnit)
{
- RegLocation res = LOC_C_RETURN;
- res.lowReg = rDX;
- oatClobber(cUnit, rDX);
- oatMarkInUse(cUnit, rDX);
- return res;
+ RegLocation res = LOC_C_RETURN;
+ res.lowReg = rDX;
+ oatClobber(cUnit, rDX);
+ oatMarkInUse(cUnit, rDX);
+ return res;
}
extern RegisterInfo* oatGetRegInfo(CompilationUnit* cUnit, int reg)
{
- return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
+ return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
+ : &cUnit->regPool->coreRegs[reg];
}
/* To be used when explicitly managing register use */
extern void oatLockCallTemps(CompilationUnit* cUnit)
{
- oatLockTemp(cUnit, rARG0);
- oatLockTemp(cUnit, rARG1);
- oatLockTemp(cUnit, rARG2);
- oatLockTemp(cUnit, rARG3);
+ oatLockTemp(cUnit, rARG0);
+ oatLockTemp(cUnit, rARG1);
+ oatLockTemp(cUnit, rARG2);
+ oatLockTemp(cUnit, rARG3);
}
/* To be used when explicitly managing register use */
extern void oatFreeCallTemps(CompilationUnit* cUnit)
{
- oatFreeTemp(cUnit, rARG0);
- oatFreeTemp(cUnit, rARG1);
- oatFreeTemp(cUnit, rARG2);
- oatFreeTemp(cUnit, rARG3);
+ oatFreeTemp(cUnit, rARG0);
+ oatFreeTemp(cUnit, rARG1);
+ oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG3);
}
/* Convert an instruction to a NOP */
void oatNopLIR( LIR* lir)
{
- ((LIR*)lir)->flags.isNop = true;
+ ((LIR*)lir)->flags.isNop = true;
}
} // namespace art
diff --git a/src/compiler/codegen/x86/x86/ArchVariant.cc b/src/compiler/codegen/x86/x86/ArchVariant.cc
index 2bb84d7..4b70202 100644
--- a/src/compiler/codegen/x86/x86/ArchVariant.cc
+++ b/src/compiler/codegen/x86/x86/ArchVariant.cc
@@ -27,33 +27,33 @@
*/
InstructionSet oatInstructionSet()
{
- return kX86;
+ return kX86;
}
/* Architecture-specific initializations and checks go here */
bool oatArchVariantInit(void)
{
- return true;
+ return true;
}
int dvmCompilerTargetOptHint(int key)
{
- int res;
- switch (key) {
- case kMaxHoistDistance:
- res = 2;
- break;
- default:
- LOG(FATAL) << "Unknown target optimization hint key: " << key;
- }
- return res;
+ int res;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 2;
+ break;
+ default:
+ LOG(FATAL) << "Unknown target optimization hint key: " << key;
+ }
+ return res;
}
void oatGenMemBarrier(CompilationUnit *cUnit, int /* barrierKind */)
{
#if ANDROID_SMP != 0
- // TODO: optimize fences
- newLIR0(cUnit, kX86Mfence);
+ // TODO: optimize fences
+ newLIR0(cUnit, kX86Mfence);
#endif
}