Tighten the safe points for code cache resets to happen.
Add a new flag in the Thread struct to track the whereabout of the top frame
in each Java thread. It is not safe to blow away the code cache if any thread
is in the JIT'ed land.
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index bf44467..3886cce 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -59,9 +59,12 @@
dvmLockMutex(&gDvmJit.compilerLock);
- /* Queue full */
- if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
- gDvmJit.codeCacheFull == true) {
+ /*
+ * Return if queue is full.
+ * If the code cache is full, we will allow the work order to be added and
+ * we use that to trigger code cache reset.
+ */
+ if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE) {
result = false;
goto done;
}
@@ -128,6 +131,9 @@
return false;
}
+ // For debugging only
+ // LOGD("Code cache starts at %p", gDvmJit.codeCache);
+
/* Copy the template code into the beginning of the code cache */
int templateSize = (intptr_t) dmvCompilerTemplateEnd -
(intptr_t) dvmCompilerTemplateStart;
@@ -144,45 +150,101 @@
return true;
}
+static void crawlDalvikStack(Thread *thread, bool print)
+{
+ void *fp = thread->curFrame;
+ StackSaveArea* saveArea = NULL;
+ int stackLevel = 0;
+
+ if (print) {
+ LOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
+ dvmGetThreadStatusStr(thread->status),
+ thread->inJitCodeCache,
+ thread->inJitCodeCache ? "jit" : "interp");
+ }
+ /* Crawl the Dalvik stack frames to clear the returnAddr field */
+ while (fp != NULL) {
+ saveArea = SAVEAREA_FROM_FP(fp);
+
+ if (print) {
+ if (dvmIsBreakFrame(fp)) {
+ LOGD(" #%d: break frame (%p)",
+ stackLevel, saveArea->returnAddr);
+ }
+ else {
+ LOGD(" #%d: %s.%s%s (%p)",
+ stackLevel,
+ saveArea->method->clazz->descriptor,
+ saveArea->method->name,
+ dvmIsNativeMethod(saveArea->method) ?
+ " (native)" : "",
+ saveArea->returnAddr);
+ }
+ }
+ stackLevel++;
+ saveArea->returnAddr = NULL;
+ assert(fp != saveArea->prevFrame);
+ fp = saveArea->prevFrame;
+ }
+ /* Make sure the stack is fully unwound to the bottom */
+ assert(saveArea == NULL ||
+ (u1 *) (saveArea+1) == thread->interpStackStart);
+}
+
static void resetCodeCache(void)
{
- Thread* self = dvmThreadSelf();
Thread* thread;
+ u8 startTime = dvmGetRelativeTimeUsec();
+ int inJit = 0;
- LOGD("Reset the JIT code cache (%d bytes used)", gDvmJit.codeCacheByteUsed);
+ LOGD("Reset the JIT code cache (%d bytes used / %d time(s))",
+ gDvmJit.codeCacheByteUsed, ++gDvmJit.numCodeCacheReset);
/* Stop the world */
dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
+ /* If any thread is found stuck in the JIT state, don't reset the cache */
+ for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+ if (thread->inJitCodeCache) {
+ inJit++;
+ /*
+ * STOPSHIP
+ * Change the verbose mode to false after the new code receives
+ * more QA love.
+ */
+ crawlDalvikStack(thread, true);
+ }
+ }
+
+ if (inJit) {
+ /* Wait a while for the busy threads to rest and try again */
+ gDvmJit.delayCodeCacheReset = 256;
+ goto done;
+ }
+
+ /* Drain the work queue to free the work order */
+ while (workQueueLength()) {
+ CompilerWorkOrder work = workDequeue();
+ free(work.info);
+ }
+
/* Wipe out the returnAddr field that soon will point to stale code */
for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
- if (thread == self)
- continue;
-
- /* Crawl the Dalvik stack frames */
- StackSaveArea *ssaPtr = ((StackSaveArea *) thread->curFrame) - 1;
- while (ssaPtr != ((StackSaveArea *) NULL) - 1) {
- ssaPtr->returnAddr = NULL;
- ssaPtr = ((StackSaveArea *) ssaPtr->prevFrame) - 1;
- };
+ crawlDalvikStack(thread, false);
}
/* Reset the JitEntry table contents to the initial unpopulated state */
dvmJitResetTable();
-#if 0
/*
- * Uncomment the following code when testing/debugging.
- *
* Wipe out the code cache content to force immediate crashes if
* stale JIT'ed code is invoked.
*/
- memset(gDvmJit.codeCache,
- (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed,
- 0);
+ memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
+ 0,
+ gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
cacheflush((intptr_t) gDvmJit.codeCache,
(intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed, 0);
-#endif
/* Reset the current mark of used bytes to the end of template code */
gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
@@ -197,6 +259,10 @@
/* All clear now */
gDvmJit.codeCacheFull = false;
+ LOGD("Code cache reset takes %lld usec",
+ dvmGetRelativeTimeUsec() - startTime);
+
+done:
/* Resume all threads */
dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
}
@@ -263,7 +329,15 @@
*/
#if 0
if (gDvmJit.codeCacheFull == true) {
- resetCodeCache();
+ if (gDvmJit.delayCodeCacheReset == 0) {
+ resetCodeCache();
+ assert(workQueueLength() == 0 ||
+ gDvmJit.delayCodeCacheReset != 0);
+ } else {
+ LOGD("Delay the next %d tries to reset code cache",
+ gDvmJit.delayCodeCacheReset);
+ gDvmJit.delayCodeCacheReset--;
+ }
}
#endif
} while (workQueueLength() != 0);
diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c
index 7c16ff9..6a59c7e 100644
--- a/vm/compiler/codegen/arm/Assemble.c
+++ b/vm/compiler/codegen/arm/Assemble.c
@@ -1464,6 +1464,7 @@
/* Stop the world */
dvmSuspendAllThreads(SUSPEND_FOR_IC_PATCH);
+
COMPILER_TRACE_CHAINING(
LOGD("Jit Runtime: predicted chain %p from %s to %s (%s) patched",
cellDest, cellDest->clazz ? cellDest->clazz->descriptor : "NULL",
@@ -1542,11 +1543,14 @@
case kChainingCellInvokePredicted:
targetOffset = 0;
predChainCell = (PredictedChainingCell *) pChainCells;
- /* Reset the cell to the init state */
- predChainCell->branch = PREDICTED_CHAIN_BX_PAIR_INIT;
+ /*
+ * There could be a race on another mutator thread to use
+ * this particular predicted cell and the check has passed
+ * the clazz comparison. So we cannot safely wipe the
+ * method and branch but it is safe to clear the clazz,
+ * which serves as the key.
+ */
predChainCell->clazz = PREDICTED_CHAIN_CLAZZ_INIT;
- predChainCell->method = PREDICTED_CHAIN_METHOD_INIT;
- predChainCell->counter = PREDICTED_CHAIN_COUNTER_INIT;
break;
#if defined(WITH_SELF_VERIFICATION)
case kChainingCellBackwardBranch:
@@ -1611,6 +1615,7 @@
}
cacheflush((long)lowAddress, (long)highAddress, 0);
dvmUnlockMutex(&gDvmJit.tableLock);
+ gDvmJit.translationChains = 0;
}
}
diff --git a/vm/compiler/codegen/arm/CodegenDriver.c b/vm/compiler/codegen/arm/CodegenDriver.c
index f92e347..1993c9d 100644
--- a/vm/compiler/codegen/arm/CodegenDriver.c
+++ b/vm/compiler/codegen/arm/CodegenDriver.c
@@ -4103,7 +4103,8 @@
{
bool res;
- if (gDvmJit.codeCacheFull) {
+ if (gDvmJit.codeCacheFull &&
+ (work->kind != kWorkOrderICPatch)) {
return false;
}
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
index 20bb3ab..d6e6763 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -16,7 +16,7 @@
SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
add r12, lr, #2 @ setup the punt-to-interp address
sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
- ldr r8, [r8] @ r3<- suspendCount (int)
+ ldr r8, [r8] @ r8<- suspendCount (int)
cmp r10, r9 @ bottom < interpStackEnd?
bxlt r12 @ return to raise stack overflow excep.
@ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
index 0ecccfa..945203e 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -30,7 +30,9 @@
@ go ahead and transfer control to the native code
ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+ mov r2, #0
str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
+ str r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
@ newFp->localRefCookie=top
mov r9, r3 @ r9<- glue->self (preserve)
@@ -55,12 +57,16 @@
@ r0 = dalvikCallsitePC
bne .LhandleException @ no, handle exception
+ str r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
cmp r2, #0 @ return chaining cell still exists?
bxne r2 @ yes - go ahead
@ continue executing the next instruction through the interpreter
ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S
add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(EXIT_STATS)
+ mov r0, #kCallsiteInterpreted
+#endif
mov pc, r1
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
index 88b38e0..c4f766a 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -13,7 +13,7 @@
sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
- ldr r8, [r8] @ r3<- suspendCount (int)
+ ldr r8, [r8] @ r8<- suspendCount (int)
cmp r10, r9 @ bottom < interpStackEnd?
bxlt lr @ return to raise stack overflow excep.
@ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index 1c85b19..aa9884a 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -35,6 +35,7 @@
str r0, [rGLUE, #offGlue_methodClassDex]
cmp r8, #0 @ check the suspendCount
movne r9, #0 @ clear the chaining cell address
+ str r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
cmp r9, #0 @ chaining cell exists?
blxne r9 @ jump to the chaining cell
#if defined(EXIT_STATS)
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index dfbf9a8..c1487dd 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -10,7 +10,9 @@
@ Prep for the native call
@ r1 = newFP, r0 = methodToCall
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ mov r2, #0
ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+ str r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
@ newFp->localRefCookie=top
@@ -36,12 +38,16 @@
@ r0 = dalvikCallsitePC
bne .LhandleException @ no, handle exception
+ str r2, [r9, #offThread_inJitCodeCache] @ set the new mode
cmp r2, #0 @ return chaining cell still exists?
bxne r2 @ yes - go ahead
@ continue executing the next instruction through the interpreter
ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S
add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(EXIT_STATS)
+ mov r0, #kCallsiteInterpreted
+#endif
mov pc, r1
/*
@@ -49,6 +55,9 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+ ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ mov r2, #0
+ str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index af487fd..36d3ea1 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -209,6 +209,7 @@
str r0, [rGLUE, #offGlue_methodClassDex]
cmp r8, #0 @ check the suspendCount
movne r9, #0 @ clear the chaining cell address
+ str r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
cmp r9, #0 @ chaining cell exists?
blxne r9 @ jump to the chaining cell
#if defined(EXIT_STATS)
@@ -420,7 +421,9 @@
@ go ahead and transfer control to the native code
ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+ mov r2, #0
str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
+ str r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
@ newFp->localRefCookie=top
mov r9, r3 @ r9<- glue->self (preserve)
@@ -445,12 +448,16 @@
@ r0 = dalvikCallsitePC
bne .LhandleException @ no, handle exception
+ str r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
cmp r2, #0 @ return chaining cell still exists?
bxne r2 @ yes - go ahead
@ continue executing the next instruction through the interpreter
ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S
add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(EXIT_STATS)
+ mov r0, #kCallsiteInterpreted
+#endif
mov pc, r1
@@ -1395,7 +1402,9 @@
@ Prep for the native call
@ r1 = newFP, r0 = methodToCall
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ mov r2, #0
ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+ str r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
@ newFp->localRefCookie=top
@@ -1421,12 +1430,16 @@
@ r0 = dalvikCallsitePC
bne .LhandleException @ no, handle exception
+ str r2, [r9, #offThread_inJitCodeCache] @ set the new mode
cmp r2, #0 @ return chaining cell still exists?
bxne r2 @ yes - go ahead
@ continue executing the next instruction through the interpreter
ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S
add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(EXIT_STATS)
+ mov r0, #kCallsiteInterpreted
+#endif
mov pc, r1
/*
@@ -1434,6 +1447,9 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+ ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ mov r2, #0
+ str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 5715b9b..48dd707 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -209,6 +209,7 @@
str r0, [rGLUE, #offGlue_methodClassDex]
cmp r8, #0 @ check the suspendCount
movne r9, #0 @ clear the chaining cell address
+ str r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
cmp r9, #0 @ chaining cell exists?
blxne r9 @ jump to the chaining cell
#if defined(EXIT_STATS)
@@ -420,7 +421,9 @@
@ go ahead and transfer control to the native code
ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+ mov r2, #0
str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
+ str r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
@ newFp->localRefCookie=top
mov r9, r3 @ r9<- glue->self (preserve)
@@ -445,12 +448,16 @@
@ r0 = dalvikCallsitePC
bne .LhandleException @ no, handle exception
+ str r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
cmp r2, #0 @ return chaining cell still exists?
bxne r2 @ yes - go ahead
@ continue executing the next instruction through the interpreter
ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S
add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(EXIT_STATS)
+ mov r0, #kCallsiteInterpreted
+#endif
mov pc, r1
@@ -1120,7 +1127,9 @@
@ Prep for the native call
@ r1 = newFP, r0 = methodToCall
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ mov r2, #0
ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+ str r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
@ newFp->localRefCookie=top
@@ -1146,12 +1155,16 @@
@ r0 = dalvikCallsitePC
bne .LhandleException @ no, handle exception
+ str r2, [r9, #offThread_inJitCodeCache] @ set the new mode
cmp r2, #0 @ return chaining cell still exists?
bxne r2 @ yes - go ahead
@ continue executing the next instruction through the interpreter
ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S
add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(EXIT_STATS)
+ mov r0, #kCallsiteInterpreted
+#endif
mov pc, r1
/*
@@ -1159,6 +1172,9 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+ ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ mov r2, #0
+ str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index e3edf7d..469919a 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -209,6 +209,7 @@
str r0, [rGLUE, #offGlue_methodClassDex]
cmp r8, #0 @ check the suspendCount
movne r9, #0 @ clear the chaining cell address
+ str r9, [r3, #offThread_inJitCodeCache] @ in code cache or not
cmp r9, #0 @ chaining cell exists?
blxne r9 @ jump to the chaining cell
#if defined(EXIT_STATS)
@@ -420,7 +421,9 @@
@ go ahead and transfer control to the native code
ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+ mov r2, #0
str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
+ str r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache
str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
@ newFp->localRefCookie=top
mov r9, r3 @ r9<- glue->self (preserve)
@@ -445,12 +448,16 @@
@ r0 = dalvikCallsitePC
bne .LhandleException @ no, handle exception
+ str r2, [r9, #offThread_inJitCodeCache] @ set the mode properly
cmp r2, #0 @ return chaining cell still exists?
bxne r2 @ yes - go ahead
@ continue executing the next instruction through the interpreter
ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S
add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(EXIT_STATS)
+ mov r0, #kCallsiteInterpreted
+#endif
mov pc, r1
@@ -1395,7 +1402,9 @@
@ Prep for the native call
@ r1 = newFP, r0 = methodToCall
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ mov r2, #0
ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
+ str r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache
str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
@ newFp->localRefCookie=top
@@ -1421,12 +1430,16 @@
@ r0 = dalvikCallsitePC
bne .LhandleException @ no, handle exception
+ str r2, [r9, #offThread_inJitCodeCache] @ set the new mode
cmp r2, #0 @ return chaining cell still exists?
bxne r2 @ yes - go ahead
@ continue executing the next instruction through the interpreter
ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S
add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(EXIT_STATS)
+ mov r0, #kCallsiteInterpreted
+#endif
mov pc, r1
/*
@@ -1434,6 +1447,9 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+ ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ mov r2, #0
+ str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address