Implement method inlining for getters/setters
Changes include:
1) Force the trace that ends with an invoke instruction to include
the next instruction if it is a move-result (because both need
to be turned into no-ops if callee is inlined).
2) Interpreter entry point/trace builder changes so that return
target won't automatically be considered as trace starting points
(to avoid duplicate traces that include the move result
instructions).
3) Codegen changes to handle getters/setters invoked from both
monomorphic and polymorphic callsites.
4) Extend/fix self-verification to form identical trace regions and
handle traces with inlined callees.
5) Apply touchups to the method based parsing - still not in use.
Change-Id: I116b934df01bf9ada6d5a25187510e352bccd13c
diff --git a/vm/compiler/CompilerIR.h b/vm/compiler/CompilerIR.h
index 2bf243d..21aadec 100644
--- a/vm/compiler/CompilerIR.h
+++ b/vm/compiler/CompilerIR.h
@@ -54,9 +54,11 @@
kChainingCellGap,
/* Don't insert new fields between Gap and Last */
kChainingCellLast = kChainingCellGap + 1,
- kEntryBlock,
+ kMethodEntryBlock,
+ kTraceEntryBlock,
kDalvikByteCode,
- kExitBlock,
+ kTraceExitBlock,
+ kMethodExitBlock,
kPCReconstruction,
kExceptionHandling,
} BBType;
@@ -82,6 +84,7 @@
kMirOpNullNRangeDownCheck,
kMirOpLowerBound,
kMirOpPunt,
+ kMirOpCheckInlinePrediction, // Gen checks for predicted inlining
kMirOpLast,
};
@@ -92,12 +95,24 @@
kMIRNullCheckOnly,
kMIRIgnoreRangeCheck,
kMIRRangeCheckOnly,
+ kMIRInlined, // Invoke is inlined (ie dead)
+ kMIRInlinedPred, // Invoke is inlined via prediction
+ kMIRCallee, // Instruction is inlined from callee
} MIROptimizationFlagPositons;
#define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck)
#define MIR_NULL_CHECK_ONLY (1 << kMIRNullCheckOnly)
#define MIR_IGNORE_RANGE_CHECK (1 << kMIRIgnoreRangeCheck)
#define MIR_RANGE_CHECK_ONLY (1 << kMIRRangeCheckOnly)
+#define MIR_INLINED (1 << kMIRInlined)
+#define MIR_INLINED_PRED (1 << kMIRInlinedPred)
+#define MIR_CALLEE (1 << kMIRCallee)
+
+typedef struct CallsiteInfo {
+ const ClassObject *clazz;
+ const Method *method;
+ LIR *misPredBranchOver;
+} CallsiteInfo;
typedef struct MIR {
DecodedInstruction dalvikInsn;
@@ -108,6 +123,12 @@
struct SSARepresentation *ssaRep;
int OptimizationFlags;
int seqNum;
+ union {
+ // Used by the inlined insn from the callee to find the mother method
+ const Method *calleeMethod;
+ // Used by the inlined invoke to find the class and method pointers
+ CallsiteInfo *callsiteInfo;
+ } meta;
} MIR;
struct BasicBlockDataFlow;
@@ -119,6 +140,7 @@
const Method *containingMethod; // For blocks from the callee
BBType blockType;
bool needFallThroughBranch; // For blocks ended due to length limit
+ bool isFallThroughFromInvoke; // True means the block needs alignment
MIR *firstMIRInsn;
MIR *lastMIRInsn;
struct BasicBlock *fallThrough;
@@ -150,8 +172,10 @@
bool allSingleStep;
bool halveInstCount;
bool executionCount; // Add code to count trace executions
- bool hasLoop;
+ bool hasLoop; // Contains a loop
+ bool hasInvoke; // Contains an invoke instruction
bool heapMemOp; // Mark mem ops for self verification
+ bool wholeMethod;
int numChainingCells[kChainingCellGap];
LIR *firstChainingLIR[kChainingCellGap];
LIR *chainingCellBottom;
@@ -196,6 +220,8 @@
void dvmCompilerPrependMIR(BasicBlock *bb, MIR *mir);
+void dvmCompilerInsertMIRAfter(BasicBlock *bb, MIR *currentMIR, MIR *newMIR);
+
void dvmCompilerAppendLIR(CompilationUnit *cUnit, LIR *lir);
void dvmCompilerInsertLIRBefore(LIR *currentLIR, LIR *newLIR);