Makes the primary Jit table growable. Also includes a change suggested earlier by Dan to use a pre-defined mask in the hash function. Reduce the default JitTable size from 2048 entries to 512 entries.
Update per Ben's comments.
diff --git a/vm/Globals.h b/vm/Globals.h
index 2c51a05..147bb22 100644
--- a/vm/Globals.h
+++ b/vm/Globals.h
@@ -654,7 +654,13 @@
unsigned char *pProfTableCopy;
/* Size of JIT hash table in entries. Must be a power of 2 */
- unsigned int maxTableEntries;
+ unsigned int jitTableSize;
+
+ /* Mask used in hash function for JitTable. Should be jitTableSize-1 */
+ unsigned int jitTableMask;
+
+ /* How many entries in the JitEntryTable are in use */
+ unsigned int jitTableEntriesUsed;
/* Trigger for trace selection */
unsigned short threshold;
diff --git a/vm/Init.c b/vm/Init.c
index 5295f49..7486ad9 100644
--- a/vm/Init.c
+++ b/vm/Init.c
@@ -1011,7 +1011,8 @@
* frameworks/base/core/jni/AndroidRuntime.cpp
*/
gDvmJit.blockingMode = false;
- gDvmJit.maxTableEntries = 2048;
+ gDvmJit.jitTableSize = 512;
+ gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
gDvmJit.threshold = 200;
#else
gDvm.executionMode = kExecutionModeInterpFast;
diff --git a/vm/Thread.h b/vm/Thread.h
index fef8b24..f1e5651 100644
--- a/vm/Thread.h
+++ b/vm/Thread.h
@@ -255,6 +255,9 @@
SUSPEND_FOR_DEBUG_EVENT,
SUSPEND_FOR_STACK_DUMP,
SUSPEND_FOR_DEX_OPT,
+#if defined(WITH_JIT)
+ SUSPEND_FOR_JIT,
+#endif
} SuspendCause;
void dvmSuspendThread(Thread* thread);
void dvmSuspendSelf(bool jdwpActivity);
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index 98a4e1d..0b5c211 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -123,6 +123,11 @@
dvmUnlockMutex(&gDvmJit.compilerLock);
/* Check whether there is a suspend request on me */
dvmCheckSuspendPending(NULL);
+ /* Is JitTable filling up? */
+ if (gDvmJit.jitTableEntriesUsed >
+ (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
+ dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
+ }
if (gDvmJit.haltCompilerThread) {
LOGD("Compiler shutdown in progress - discarding request");
} else {
diff --git a/vm/compiler/codegen/armv5te/Assemble.c b/vm/compiler/codegen/armv5te/Assemble.c
index 12cb158..f874f48 100644
--- a/vm/compiler/codegen/armv5te/Assemble.c
+++ b/vm/compiler/codegen/armv5te/Assemble.c
@@ -583,7 +583,7 @@
if (gDvmJit.pJitEntryTable != NULL) {
COMPILER_TRACE_CHAINING(LOGD("Jit Runtime: unchaining all"));
dvmLockMutex(&gDvmJit.tableLock);
- for (i = 0; i < gDvmJit.maxTableEntries; i++) {
+ for (i = 0; i < gDvmJit.jitTableSize; i++) {
if (gDvmJit.pJitEntryTable[i].dPC &&
gDvmJit.pJitEntryTable[i].codeAddress) {
u4* lastAddress;
diff --git a/vm/interp/InterpDefs.h b/vm/interp/InterpDefs.h
index 6c67cb5..cbb78f0 100644
--- a/vm/interp/InterpDefs.h
+++ b/vm/interp/InterpDefs.h
@@ -122,7 +122,6 @@
/*
* Local copies of field from gDvm placed here for fast access
*/
- struct JitEntry* pJitTable;
unsigned char* pJitProfTable;
JitState jitState;
void* jitResume;
diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c
index dd94af3..031d46d 100644
--- a/vm/interp/Jit.c
+++ b/vm/interp/Jit.c
@@ -65,17 +65,17 @@
dvmInitMutex(&gDvmJit.tableLock);
if (res && gDvm.executionMode == kExecutionModeJit) {
struct JitEntry *pJitTable = NULL;
- int tableSize = sizeof(*pJitTable) * gDvmJit.maxTableEntries;
unsigned char *pJitProfTable = NULL;
+ assert(gDvm.jitTableSize &&
+ !(gDvm.jitTableSize & (gDvmJit.jitTableSize - 1))); // Power of 2?
dvmLockMutex(&gDvmJit.tableLock);
- assert(sizeof(*pJitTable) == 12);
- pJitTable = (struct JitEntry*)malloc(tableSize);
+ pJitTable = (struct JitEntry*)
+ calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
if (!pJitTable) {
LOGE("jit table allocation failed\n");
res = false;
goto done;
}
- memset(pJitTable,0,tableSize);
/*
* NOTE: the profile table must only be allocated once, globally.
* Profiling is turned on and off by nulling out gDvm.pJitProfTable
@@ -91,8 +91,8 @@
goto done;
}
memset(pJitProfTable,0,JIT_PROF_SIZE);
- for (i=0; i < gDvmJit.maxTableEntries; i++) {
- pJitTable[i].chain = gDvmJit.maxTableEntries;
+ for (i=0; i < gDvmJit.jitTableSize; i++) {
+ pJitTable[i].chain = gDvmJit.jitTableSize;
}
/* Is chain field wide enough for termination pattern? */
assert(pJitTable[0].chain == gDvm.maxJitTableEntries);
@@ -100,6 +100,8 @@
done:
gDvmJit.pJitEntryTable = pJitTable;
+ gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
+ gDvmJit.jitTableEntriesUsed = 0;
gDvmJit.pProfTableCopy = gDvmJit.pProfTable = pJitProfTable;
dvmUnlockMutex(&gDvmJit.tableLock);
}
@@ -155,13 +157,13 @@
int chains;
if (gDvmJit.pJitEntryTable) {
for (i=0, chains=hit=not_hit=0;
- i < (int) gDvmJit.maxTableEntries;
+ i < (int) gDvmJit.jitTableSize;
i++) {
if (gDvmJit.pJitEntryTable[i].dPC != 0)
hit++;
else
not_hit++;
- if (gDvmJit.pJitEntryTable[i].chain != gDvmJit.maxTableEntries)
+ if (gDvmJit.pJitEntryTable[i].chain != gDvmJit.jitTableSize)
chains++;
}
LOGD(
@@ -354,7 +356,7 @@
if (gDvmJit.pJitEntryTable[idx].dPC == pc)
return &gDvmJit.pJitEntryTable[idx];
else {
- int chainEndMarker = gDvmJit.maxTableEntries;
+ int chainEndMarker = gDvmJit.jitTableSize;
while (gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) {
idx = gDvmJit.pJitEntryTable[idx].chain;
if (gDvmJit.pJitEntryTable[idx].dPC == pc)
@@ -364,6 +366,46 @@
return NULL;
}
+struct JitEntry *dvmFindJitEntry(const u2* pc)
+{
+ return findJitEntry(pc);
+}
+
+/*
+ * Allocate an entry in a JitTable. Assumes caller holds lock, if
+ * applicable. Normally used for table resizing. Will complain (die)
+ * if entry already exists in the table or if table is full.
+ */
+static struct JitEntry *allocateJitEntry(const u2* pc, struct JitEntry *table,
+ u4 size)
+{
+ struct JitEntry *p;
+ unsigned int idx;
+ unsigned int prev;
+ idx = dvmJitHashMask(pc, size-1);
+ while ((table[idx].chain != size) && (table[idx].dPC != pc)) {
+ idx = table[idx].chain;
+ }
+ assert(table[idx].dPC != pc); /* Already there */
+ if (table[idx].dPC == NULL) {
+ /* use this slot */
+ return &table[idx];
+ }
+ /* Find a free entry and chain it in */
+ prev = idx;
+ while (true) {
+ idx++;
+ if (idx == size)
+ idx = 0; /* Wraparound */
+ if ((table[idx].dPC == NULL) || (idx == prev))
+ break;
+ }
+ assert(idx != prev);
+ table[prev].chain = idx;
+ assert(table[idx].dPC == NULL);
+ return &table[idx];
+}
+
/*
* If a translated code address exists for the davik byte code
* pointer return it. This routine needs to be fast.
@@ -384,7 +426,7 @@
#endif
return gDvmJit.pJitEntryTable[idx].codeAddress;
} else {
- int chainEndMarker = gDvmJit.maxTableEntries;
+ int chainEndMarker = gDvmJit.jitTableSize;
while (gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) {
idx = gDvmJit.pJitEntryTable[idx].chain;
if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
@@ -444,7 +486,7 @@
resetProfileCounts();
res = true; /* Stale profile - abort */
} else if (interpState->jitState == kJitTSelectRequest) {
- u4 chainEndMarker = gDvmJit.maxTableEntries;
+ u4 chainEndMarker = gDvmJit.jitTableSize;
u4 idx = dvmJitHash(interpState->pc);
/* Walk the bucket chain to find an exact match for our PC */
@@ -503,6 +545,7 @@
if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
/* Allocate the slot */
gDvmJit.pJitEntryTable[idx].dPC = interpState->pc;
+ gDvmJit.jitTableEntriesUsed++;
} else {
/*
* Table is full. We could resize it, but that would
@@ -546,4 +589,68 @@
return res;
}
+/*
+ * Resizes the JitTable. Must be a power of 2, and returns true on failure.
+ * Stops all threads, and thus is a heavyweight operation.
+ */
+bool dvmJitResizeJitTable( unsigned int size )
+{
+ struct JitEntry *pNewTable;
+ u4 newMask;
+ unsigned int i;
+
+ assert(gDvm.pJitEntryTable != NULL);
+ assert(size && !(size & (size - 1))); /* Is power of 2? */
+
+ LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
+
+ newMask = size - 1;
+
+ if (size <= gDvmJit.jitTableSize) {
+ return true;
+ }
+
+ pNewTable = (struct JitEntry*)calloc(size, sizeof(*pNewTable));
+ if (pNewTable == NULL) {
+ return true;
+ }
+ for (i=0; i< size; i++) {
+ pNewTable[i].chain = size; /* Initialize chain termination */
+ }
+
+ /* Stop all other interpreting/jit'ng threads */
+ dvmSuspendAllThreads(SUSPEND_FOR_JIT);
+
+ /*
+ * At this point, only the compiler thread may be in contention
+ * for the jitEntryTable (it is not affected by the thread suspension).
+ * Aquire the lock.
+ */
+
+ dvmLockMutex(&gDvmJit.tableLock);
+
+ for (i=0; i < gDvmJit.jitTableSize; i++) {
+ if (gDvmJit.pJitEntryTable[i].dPC) {
+ struct JitEntry *p;
+ p = allocateJitEntry(gDvmJit.pJitEntryTable[i].dPC,
+ pNewTable, size);
+ p->dPC = gDvmJit.pJitEntryTable[i].dPC;
+ p->codeAddress = gDvmJit.pJitEntryTable[i].codeAddress;
+ }
+ }
+
+ free(gDvmJit.pJitEntryTable);
+ gDvmJit.pJitEntryTable = pNewTable;
+ gDvmJit.jitTableSize = size;
+ gDvmJit.jitTableMask = size - 1;
+
+ dvmUnlockMutex(&gDvmJit.tableLock);
+
+ /* Restart the world */
+ dvmResumeAllThreads(SUSPEND_FOR_JIT);
+
+ return false;
+}
+
+
#endif /* WITH_JIT */
diff --git a/vm/interp/Jit.h b/vm/interp/Jit.h
index 854b13e..5d748d5 100644
--- a/vm/interp/Jit.h
+++ b/vm/interp/Jit.h
@@ -28,14 +28,17 @@
/*
* JitTable hash function.
*/
-static inline u4 dvmJitHash( const u2* p ) {
- /*
- * TODO - Might make sense to keep "maxTableEntries - 1" as its own
- * variable for speed reasons.
- */
- return ((((u4)p>>12)^(u4)p)>>1) & (gDvmJit.maxTableEntries-1);
+
+static inline u4 dvmJitHashMask( const u2* p, u4 mask ) {
+ return ((((u4)p>>12)^(u4)p)>>1) & (mask);
}
+static inline u4 dvmJitHash( const u2* p ) {
+ return dvmJitHashMask( p, gDvmJit.jitTableMask );
+}
+
+
+
/*
* Entries in the JIT's address lookup hash table.
* with assembly hash function in mterp.
@@ -56,7 +59,10 @@
void dvmJitSetCodeAddr(const u2* dPC, void *nPC);
bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState);
void* dvmJitChain(void* tgtAddr, u4* branchAddr);
-void dvmJitStopTranslationRequests();
-void dvmJitStats();
+void dvmJitStopTranslationRequests(void);
+void dvmJitStats(void);
+bool dvmJitResizeJitTable(unsigned int size);
+struct JitEntry *dvmFindJitEntry(const u2* pc);
+
#endif /*_DALVIK_INTERP_JIT*/
diff --git a/vm/mterp/Mterp.c b/vm/mterp/Mterp.c
index 80a7b2b..60e2de8 100644
--- a/vm/mterp/Mterp.c
+++ b/vm/mterp/Mterp.c
@@ -78,7 +78,6 @@
glue->interpStackEnd = self->interpStackEnd;
glue->pSelfSuspendCount = &self->suspendCount;
#if defined(WITH_JIT)
- glue->pJitTable = gDvmJit.pJitEntryTable;
glue->pJitProfTable = gDvmJit.pProfTable;
#endif
#if defined(WITH_DEBUGGER)
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index f4df342..295b5d4 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -69,7 +69,6 @@
beq 2f
mov r1,rINST
bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
cmp r0,#0 @ successful chain?
bxne r0 @ continue native execution
b toInterpreter @ didn't chain - resume with interpreter
@@ -111,7 +110,6 @@
beq toInterpreter @ go if not, otherwise do chain
mov r1,rINST
bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
cmp r0,#0 @ successful chain?
bxne r0 @ continue native execution
b toInterpreter @ didn't chain - resume with interpreter
@@ -175,7 +173,6 @@
mov r0,rPC
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
- ldrne rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
beq common_selectTrace
bxne r0 @ jump to the translation
common_selectTrace:
@@ -184,9 +181,6 @@
mov r1,#1 @ set changeInterp
b common_gotoBail
-.LdvmCompilerTemplateStart:
- .word dvmCompilerTemplateStart
-
#endif
/*
@@ -542,14 +536,11 @@
mov rPC, r9 @ publish new rPC
str r1, [rGLUE, #offGlue_methodClassDex]
cmp r3, #0 @ caller is compiled code
- bne 1f
+ blxne r3
GET_INST_OPCODE(ip) @ extract opcode from rINST
cmp r0,#0
bne common_updateProfile
GOTO_OPCODE(ip) @ jump to next instruction
-1:
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
- blx r3
#else
GET_INST_OPCODE(ip) @ extract opcode from rINST
mov rPC, r9 @ publish new rPC
diff --git a/vm/mterp/armv5te/header.S b/vm/mterp/armv5te/header.S
index e5a82c2..e129b15 100644
--- a/vm/mterp/armv5te/header.S
+++ b/vm/mterp/armv5te/header.S
@@ -178,10 +178,7 @@
#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
#if defined(WITH_JIT)
-/*
- * Null definition for overhead measuring purposes
- */
-#define GET_JIT_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitTable]
+#define GET_JIT_ENABLED(_reg) ldr _reg,[rGLUE,#offGlue_jitEnabled]
#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable]
#endif
diff --git a/vm/mterp/common/asm-constants.h b/vm/mterp/common/asm-constants.h
index b4bb1c2..5c37af6 100644
--- a/vm/mterp/common/asm-constants.h
+++ b/vm/mterp/common/asm-constants.h
@@ -102,41 +102,37 @@
MTERP_OFFSET(offGlue_pActiveProfilers, MterpGlue, pActiveProfilers, 44)
MTERP_OFFSET(offGlue_entryPoint, MterpGlue, entryPoint, 48)
#if defined(WITH_JIT)
-MTERP_OFFSET(offGlue_pJitTable, MterpGlue, pJitTable, 56)
-MTERP_OFFSET(offGlue_pJitProfTable, MterpGlue, pJitProfTable, 60)
-MTERP_OFFSET(offGlue_jitState, MterpGlue, jitState, 64)
-MTERP_OFFSET(offGlue_jitResume, MterpGlue, jitResume, 68)
-MTERP_OFFSET(offGlue_jitResumePC, MterpGlue, jitResumePC, 72)
+MTERP_OFFSET(offGlue_pJitProfTable, MterpGlue, pJitProfTable, 56)
+MTERP_OFFSET(offGlue_jitState, MterpGlue, jitState, 60)
+MTERP_OFFSET(offGlue_jitResume, MterpGlue, jitResume, 64)
+MTERP_OFFSET(offGlue_jitResumePC, MterpGlue, jitResumePC, 68)
#endif
#elif defined(WITH_DEBUGGER)
MTERP_OFFSET(offGlue_pDebuggerActive, MterpGlue, pDebuggerActive, 40)
MTERP_OFFSET(offGlue_entryPoint, MterpGlue, entryPoint, 44)
#if defined(WITH_JIT)
-MTERP_OFFSET(offGlue_pJitTable, MterpGlue, pJitTable, 52)
-MTERP_OFFSET(offGlue_pJitProfTable, MterpGlue, pJitProfTable, 56)
-MTERP_OFFSET(offGlue_jitState, MterpGlue, jitState, 60)
-MTERP_OFFSET(offGlue_jitResume, MterpGlue, jitResume, 64)
-MTERP_OFFSET(offGlue_jitResumePC, MterpGlue, jitResumePC, 68)
+MTERP_OFFSET(offGlue_pJitProfTable, MterpGlue, pJitProfTable, 52)
+MTERP_OFFSET(offGlue_jitState, MterpGlue, jitState, 56)
+MTERP_OFFSET(offGlue_jitResume, MterpGlue, jitResume, 60)
+MTERP_OFFSET(offGlue_jitResumePC, MterpGlue, jitResumePC, 64)
#endif
#elif defined(WITH_PROFILER)
MTERP_OFFSET(offGlue_pActiveProfilers, MterpGlue, pActiveProfilers, 40)
MTERP_OFFSET(offGlue_entryPoint, MterpGlue, entryPoint, 44)
#if defined(WITH_JIT)
-MTERP_OFFSET(offGlue_pJitTable, MterpGlue, pJitTable, 52)
-MTERP_OFFSET(offGlue_pJitProfTable, MterpGlue, pJitProfTable, 56)
-MTERP_OFFSET(offGlue_jitState, MterpGlue, jitState, 60)
-MTERP_OFFSET(offGlue_jitResume, MterpGlue, jitResume, 64)
-MTERP_OFFSET(offGlue_jitResumePC, MterpGlue, jitResumePC, 68)
-#endif
-#else
-MTERP_OFFSET(offGlue_entryPoint, MterpGlue, entryPoint, 40)
-#if defined(WITH_JIT)
-MTERP_OFFSET(offGlue_pJitTable, MterpGlue, pJitTable, 48)
MTERP_OFFSET(offGlue_pJitProfTable, MterpGlue, pJitProfTable, 52)
MTERP_OFFSET(offGlue_jitState, MterpGlue, jitState, 56)
MTERP_OFFSET(offGlue_jitResume, MterpGlue, jitResume, 60)
MTERP_OFFSET(offGlue_jitResumePC, MterpGlue, jitResumePC, 64)
#endif
+#else
+MTERP_OFFSET(offGlue_entryPoint, MterpGlue, entryPoint, 40)
+#if defined(WITH_JIT)
+MTERP_OFFSET(offGlue_pJitProfTable, MterpGlue, pJitProfTable, 48)
+MTERP_OFFSET(offGlue_jitState, MterpGlue, jitState, 52)
+MTERP_OFFSET(offGlue_jitResume, MterpGlue, jitResume, 56)
+MTERP_OFFSET(offGlue_jitResumePC, MterpGlue, jitResumePC, 60)
+#endif
#endif
/* make sure all JValue union members are stored at the same offset */
MTERP_OFFSET(offGlue_retval_z, MterpGlue, retval.z, 8)
@@ -268,4 +264,3 @@
/* opcode number */
MTERP_CONSTANT(OP_MOVE_EXCEPTION, 0x0d)
-
diff --git a/vm/mterp/out/InterpAsm-armv4t.S b/vm/mterp/out/InterpAsm-armv4t.S
index b3c2dc1..5cf4a05 100644
--- a/vm/mterp/out/InterpAsm-armv4t.S
+++ b/vm/mterp/out/InterpAsm-armv4t.S
@@ -185,10 +185,7 @@
#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
#if defined(WITH_JIT)
-/*
- * Null definition for overhead measuring purposes
- */
-#define GET_JIT_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitTable]
+#define GET_JIT_ENABLED(_reg) ldr _reg,[rGLUE,#offGlue_jitEnabled]
#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable]
#endif
@@ -9551,7 +9548,6 @@
beq 2f
mov r1,rINST
bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
cmp r0,#0 @ successful chain?
bxne r0 @ continue native execution
b toInterpreter @ didn't chain - resume with interpreter
@@ -9593,7 +9589,6 @@
beq toInterpreter @ go if not, otherwise do chain
mov r1,rINST
bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
cmp r0,#0 @ successful chain?
bxne r0 @ continue native execution
b toInterpreter @ didn't chain - resume with interpreter
@@ -9657,7 +9652,6 @@
mov r0,rPC
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
- ldrne rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
beq common_selectTrace
bxne r0 @ jump to the translation
common_selectTrace:
@@ -9666,9 +9660,6 @@
mov r1,#1 @ set changeInterp
b common_gotoBail
-.LdvmCompilerTemplateStart:
- .word dvmCompilerTemplateStart
-
#endif
/*
@@ -10024,14 +10015,11 @@
mov rPC, r9 @ publish new rPC
str r1, [rGLUE, #offGlue_methodClassDex]
cmp r3, #0 @ caller is compiled code
- bne 1f
+ blxne r3
GET_INST_OPCODE(ip) @ extract opcode from rINST
cmp r0,#0
bne common_updateProfile
GOTO_OPCODE(ip) @ jump to next instruction
-1:
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
- blx r3
#else
GET_INST_OPCODE(ip) @ extract opcode from rINST
mov rPC, r9 @ publish new rPC
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index bd30e90..3fb16ff 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -185,10 +185,7 @@
#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
#if defined(WITH_JIT)
-/*
- * Null definition for overhead measuring purposes
- */
-#define GET_JIT_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitTable]
+#define GET_JIT_ENABLED(_reg) ldr _reg,[rGLUE,#offGlue_jitEnabled]
#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable]
#endif
@@ -9065,7 +9062,6 @@
beq 2f
mov r1,rINST
bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
cmp r0,#0 @ successful chain?
bxne r0 @ continue native execution
b toInterpreter @ didn't chain - resume with interpreter
@@ -9107,7 +9103,6 @@
beq toInterpreter @ go if not, otherwise do chain
mov r1,rINST
bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
cmp r0,#0 @ successful chain?
bxne r0 @ continue native execution
b toInterpreter @ didn't chain - resume with interpreter
@@ -9171,7 +9166,6 @@
mov r0,rPC
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
- ldrne rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
beq common_selectTrace
bxne r0 @ jump to the translation
common_selectTrace:
@@ -9180,9 +9174,6 @@
mov r1,#1 @ set changeInterp
b common_gotoBail
-.LdvmCompilerTemplateStart:
- .word dvmCompilerTemplateStart
-
#endif
/*
@@ -9538,14 +9529,11 @@
mov rPC, r9 @ publish new rPC
str r1, [rGLUE, #offGlue_methodClassDex]
cmp r3, #0 @ caller is compiled code
- bne 1f
+ blxne r3
GET_INST_OPCODE(ip) @ extract opcode from rINST
cmp r0,#0
bne common_updateProfile
GOTO_OPCODE(ip) @ jump to next instruction
-1:
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
- blx r3
#else
GET_INST_OPCODE(ip) @ extract opcode from rINST
mov rPC, r9 @ publish new rPC
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index b3db103..2b3090d 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -185,10 +185,7 @@
#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
#if defined(WITH_JIT)
-/*
- * Null definition for overhead measuring purposes
- */
-#define GET_JIT_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitTable]
+#define GET_JIT_ENABLED(_reg) ldr _reg,[rGLUE,#offGlue_jitEnabled]
#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable]
#endif
@@ -9545,7 +9542,6 @@
beq 2f
mov r1,rINST
bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
cmp r0,#0 @ successful chain?
bxne r0 @ continue native execution
b toInterpreter @ didn't chain - resume with interpreter
@@ -9587,7 +9583,6 @@
beq toInterpreter @ go if not, otherwise do chain
mov r1,rINST
bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
cmp r0,#0 @ successful chain?
bxne r0 @ continue native execution
b toInterpreter @ didn't chain - resume with interpreter
@@ -9651,7 +9646,6 @@
mov r0,rPC
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
- ldrne rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
beq common_selectTrace
bxne r0 @ jump to the translation
common_selectTrace:
@@ -9660,9 +9654,6 @@
mov r1,#1 @ set changeInterp
b common_gotoBail
-.LdvmCompilerTemplateStart:
- .word dvmCompilerTemplateStart
-
#endif
/*
@@ -10018,14 +10009,11 @@
mov rPC, r9 @ publish new rPC
str r1, [rGLUE, #offGlue_methodClassDex]
cmp r3, #0 @ caller is compiled code
- bne 1f
+ blxne r3
GET_INST_OPCODE(ip) @ extract opcode from rINST
cmp r0,#0
bne common_updateProfile
GOTO_OPCODE(ip) @ jump to next instruction
-1:
- ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code
- blx r3
#else
GET_INST_OPCODE(ip) @ extract opcode from rINST
mov rPC, r9 @ publish new rPC