Fix interpreter debug attach
Fix a few miscellaneous bugs from the interpreter restructuring that were
causing a segfault on debugger attach.
Added a sanity checking routine for debugging.
Fixed a problem in which the JIT's threshold and on/off switch
wouldn't get initialized properly on thread creation.
Renamed dvmCompilerStateRefresh() to dvmCompilerUpdateGlobalState() to
better reflect its function.
Change-Id: I5b8af1ce2175e3c6f53cda19dd8e052a5f355587
diff --git a/vm/Debugger.c b/vm/Debugger.c
index 2ebc300..f7e35d9 100644
--- a/vm/Debugger.c
+++ b/vm/Debugger.c
@@ -399,6 +399,9 @@
dvmInitBreakpoints();
gDvm.debuggerActive = true;
dvmUpdateAllInterpBreak(kInterpDebugBreak, kSubModeDebuggerActive, true);
+#if defined(WITH_JIT)
+ dvmCompilerUpdateGlobalState();
+#endif
}
/*
@@ -415,6 +418,9 @@
gDvm.debuggerActive = false;
dvmUpdateAllInterpBreak(kInterpDebugBreak, kSubModeDebuggerActive, false);
+#if defined(WITH_JIT)
+ dvmCompilerUpdateGlobalState();
+#endif
dvmHashTableLock(gDvm.dbgRegistry);
gDvm.debuggerConnected = false;
diff --git a/vm/Profile.c b/vm/Profile.c
index 1635d79..6faf726 100644
--- a/vm/Profile.c
+++ b/vm/Profile.c
@@ -220,6 +220,10 @@
// Tell the threads
dvmUpdateAllInterpBreak(newBreak, newMode, enable);
+#if defined(WITH_JIT)
+ dvmCompilerUpdateGlobalState();
+#endif
+
LOGD("+++ active profiler count now %d\n", newValue);
}
@@ -748,7 +752,7 @@
/*
* Register the METHOD_TRACE_EXIT action for the fast interpreter and
* JIT'ed code for JNI methods. The about-to-return JNI callee method is passed
- * in explicitly.
+ * in explicitly. Also used for inline-execute.
*/
void dvmFastNativeMethodTraceExit(const Method* method, Thread* self)
{
diff --git a/vm/SignalCatcher.c b/vm/SignalCatcher.c
index 5a5e037..c8f9833 100644
--- a/vm/SignalCatcher.c
+++ b/vm/SignalCatcher.c
@@ -239,9 +239,10 @@
dvmCompilerDumpStats();
/* Stress-test unchain all */
dvmJitUnchainAll();
- LOGD("Send %d more signals to rest the code cache",
+ LOGD("Send %d more signals to reset the code cache",
codeCacheResetCount & 7);
}
+ dvmCheckInterpStateConsistency();
}
#endif
diff --git a/vm/Thread.c b/vm/Thread.c
index 3d9663d..cdf49bc 100644
--- a/vm/Thread.c
+++ b/vm/Thread.c
@@ -1412,6 +1412,9 @@
newThread->prev = gDvm.threadList;
gDvm.threadList->next = newThread;
+ /* Add any existing global modes to the interpBreak control */
+ dvmInitializeInterpBreak(newThread);
+
if (!dvmGetFieldBoolean(threadObj, gDvm.offJavaLangThread_daemon))
gDvm.nonDaemonThreadCount++; // guarded by thread list lock
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index a485ca5..dfa6589 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -458,7 +458,7 @@
gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
gDvmJit.pProfTableCopy = pJitProfTable;
gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
dvmUnlockMutex(&gDvmJit.tableLock);
/* Signal running threads to refresh their cached pJitTable pointers */
@@ -739,7 +739,7 @@
/* Disable new translation requests */
gDvmJit.pProfTable = NULL;
gDvmJit.pProfTableCopy = NULL;
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
if (gDvm.verboseShutdown ||
gDvmJit.profileMode == kTraceProfilingContinuous) {
@@ -774,7 +774,7 @@
*/
}
-void dvmCompilerStateRefresh()
+void dvmCompilerUpdateGlobalState()
{
bool jitActive;
bool jitActivate;
@@ -831,5 +831,5 @@
if (needUnchain)
dvmJitUnchainAll();
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
}
diff --git a/vm/compiler/Compiler.h b/vm/compiler/Compiler.h
index 5d5036f..12add75 100644
--- a/vm/compiler/Compiler.h
+++ b/vm/compiler/Compiler.h
@@ -233,7 +233,7 @@
bool isIterative);
void dvmCompilerMethodSSATransformation(struct CompilationUnit *cUnit);
bool dvmCompilerBuildLoop(struct CompilationUnit *cUnit);
-void dvmCompilerStateRefresh(void);
+void dvmCompilerUpdateGlobalState(void);
JitTraceDescription *dvmCopyTraceDescriptor(const u2 *pc,
const struct JitEntry *desc);
void *dvmCompilerGetInterpretTemplate();
diff --git a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
index c1792ed..3d85052 100644
--- a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
@@ -84,7 +84,7 @@
gDvmJit.disableOpt |= (1 << kMethodJit);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
diff --git a/vm/compiler/codegen/arm/armv5te/ArchVariant.c b/vm/compiler/codegen/arm/armv5te/ArchVariant.c
index 817b68a..57a8c8a 100644
--- a/vm/compiler/codegen/arm/armv5te/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv5te/ArchVariant.c
@@ -84,7 +84,7 @@
gDvmJit.disableOpt |= (1 << kMethodJit);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
diff --git a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
index ff80662..59d7c95 100644
--- a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
@@ -79,7 +79,7 @@
gDvmJit.disableOpt |= (1 << kMethodJit);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
diff --git a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
index ff80662..59d7c95 100644
--- a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
@@ -79,7 +79,7 @@
gDvmJit.disableOpt |= (1 << kMethodJit);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
diff --git a/vm/compiler/codegen/x86/ia32/ArchVariant.c b/vm/compiler/codegen/x86/ia32/ArchVariant.c
index 2abac88..90f14a3 100644
--- a/vm/compiler/codegen/x86/ia32/ArchVariant.c
+++ b/vm/compiler/codegen/x86/ia32/ArchVariant.c
@@ -78,7 +78,7 @@
sizeof(struct JitToInterpEntries)) <= 128);
// Make sure all threads have current values
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
return true;
}
diff --git a/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S b/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S
index 56027fd..2695483 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S
@@ -13,7 +13,7 @@
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 44accab..58c3ebf 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -1442,7 +1442,7 @@
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 3b5c857..1021481 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -1173,7 +1173,7 @@
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index 3905ec8..804b471 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -1442,7 +1442,7 @@
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index b09bc30..d8713d9 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -1442,7 +1442,7 @@
mov r3, #0 @ Record that we're not returning
str r3, [r0, #offThread_inJitCodeCache]
blx r2 @ dvmLockObject(self, obj)
- @ refresh Jit's on/off status & test for exception
+ @ test for exception
ldr r1, [rSELF, #offThread_exception]
cmp r1, #0
beq 1f
diff --git a/vm/interp/Interp.c b/vm/interp/Interp.c
index 98f478e..b4a5ec9 100644
--- a/vm/interp/Interp.c
+++ b/vm/interp/Interp.c
@@ -596,13 +596,14 @@
/*
* The interpreter just threw. Handle any special subMode requirements.
+ * All interpSave state must be valid on entry.
*/
-void dvmReportExceptionThrow(Thread* self, const Method* curMethod,
- const u2* pc, void* fp)
+void dvmReportExceptionThrow(Thread* self, Object* exception)
{
+ const Method* curMethod = self->interpSave.method;
#if defined(WITH_JIT)
if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) {
- dvmJitEndTraceSelect(self, pc);
+ dvmJitEndTraceSelect(self, self->interpSave.pc);
}
if (self->interpBreak.ctl.breakFlags & kInterpSingleStep) {
/* Discard any single-step native returns to translation */
@@ -611,17 +612,18 @@
#endif
if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
void *catchFrame;
- int offset = pc - curMethod->insns;
- int catchRelPc = dvmFindCatchBlock(self, offset, self->exception,
+ int offset = self->interpSave.pc - curMethod->insns;
+ int catchRelPc = dvmFindCatchBlock(self, offset, exception,
true, &catchFrame);
- dvmDbgPostException(fp, offset, catchFrame, catchRelPc,
- self->exception);
+ dvmDbgPostException(self->interpSave.fp, offset, catchFrame,
+ catchRelPc, exception);
}
}
/*
* The interpreter is preparing to do an invoke (both native & normal).
- * Handle any special subMode requirements.
+ * Handle any special subMode requirements. All interpSave state
+ * must be valid on entry.
*/
void dvmReportInvoke(Thread* self, const Method* methodToCall)
{
@@ -632,10 +634,10 @@
* The interpreter is preparing to do a native invoke. Handle any
* special subMode requirements. NOTE: for a native invoke,
* dvmReportInvoke() and dvmReportPreNativeInvoke() will both
- * be called prior to the invoke.
+ * be called prior to the invoke. All interpSave state must
+ * be valid on entry.
*/
-void dvmReportPreNativeInvoke(const u2* pc, Thread* self,
- const Method* methodToCall)
+void dvmReportPreNativeInvoke(const Method* methodToCall, Thread* self)
{
#if defined(WITH_JIT)
/*
@@ -643,7 +645,7 @@
* builder can't follow into or through a native method.
*/
if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) {
- dvmCheckJit(pc, self);
+ dvmCheckJit(self->interpSave.pc, self);
}
#endif
if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
@@ -656,10 +658,10 @@
/*
* The interpreter has returned from a native invoke. Handle any
- * special subMode requirements.
+ * special subMode requirements. All interpSave state must be
+ * valid on entry.
*/
-void dvmReportPostNativeInvoke(const u2* pc, Thread* self,
- const Method* methodToCall)
+void dvmReportPostNativeInvoke(const Method* methodToCall, Thread* self)
{
if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
Object* thisPtr = dvmGetThisPtr(self->interpSave.method,
@@ -674,15 +676,15 @@
/*
* The interpreter has returned from a normal method. Handle any special
- * subMode requirements.
+ * subMode requirements. All interpSave state must be valid on entry.
*/
-void dvmReportReturn(Thread* self, const u2* pc, const u4* prevFP)
+void dvmReportReturn(Thread* self)
{
TRACE_METHOD_EXIT(self, self->interpSave.method);
#if defined(WITH_JIT)
- if (dvmIsBreakFrame(prevFP) &&
+ if (dvmIsBreakFrame(self->interpSave.fp) &&
(self->interpBreak.ctl.subMode & kSubModeJitTraceBuild)) {
- dvmCheckJit(pc, self);
+ dvmCheckJit(self->interpSave.pc, self);
}
#endif
}
@@ -711,8 +713,8 @@
* breakpoints. We may be able to speed things up a bit if we don't query
* the event list unless we know there's at least one lurking within.
*/
-void dvmUpdateDebugger(const Method* method, const u2* pc, const u4* fp,
- bool methodEntry, Thread* self)
+static void updateDebugger(const Method* method, const u2* pc, const u4* fp,
+ bool methodEntry, Thread* self)
{
int eventFlags = 0;
@@ -1552,6 +1554,54 @@
}
/*
+ * Do a sanity check on interpreter state saved to Thread.
+ * A failure here doesn't necessarily mean that something is wrong,
+ * so this code should only be used during development to suggest
+ * a possible problem.
+ */
+void dvmCheckInterpStateConsistency()
+{
+ Thread* self = dvmThreadSelf();
+ Thread* thread;
+ uint8_t breakFlags;
+ uint8_t subMode;
+ void* handlerTable;
+
+ dvmLockThreadList(self);
+ breakFlags = self->interpBreak.ctl.breakFlags;
+ subMode = self->interpBreak.ctl.subMode;
+ handlerTable = self->interpBreak.ctl.curHandlerTable;
+ for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
+ if (subMode != thread->interpBreak.ctl.subMode) {
+ LOGD("Warning: subMode mismatch - 0x%x:0x%x, tid[%d]",
+ subMode,thread->interpBreak.ctl.subMode,thread->threadId);
+ }
+ if (breakFlags != thread->interpBreak.ctl.breakFlags) {
+ LOGD("Warning: breakFlags mismatch - 0x%x:0x%x, tid[%d]",
+ breakFlags,thread->interpBreak.ctl.breakFlags,thread->threadId);
+ }
+ if (handlerTable != thread->interpBreak.ctl.curHandlerTable) {
+ LOGD("Warning: curHandlerTable mismatch - 0x%x:0x%x, tid[%d]",
+ (int)handlerTable,(int)thread->interpBreak.ctl.curHandlerTable,
+ thread->threadId);
+ }
+#if defined(WITH_JIT)
+ if (thread->pJitProfTable != gDvmJit.pProfTable) {
+ LOGD("Warning: pJitProfTable mismatch - 0x%x:0x%x, tid[%d]",
+ (int)thread->pJitProfTable,(int)gDvmJit.pProfTable,
+ thread->threadId);
+ }
+ if (thread->jitThreshold != gDvmJit.threshold) {
+ LOGD("Warning: jitThreshold mismatch - 0x%x:0x%x, tid[%d]",
+ (int)thread->jitThreshold,(int)gDvmJit.threshold,
+ thread->threadId);
+ }
+#endif
+ }
+ dvmUnlockThreadList();
+}
+
+/*
* Arm a safepoint callback for a thread. If funct is null,
* clear any pending callback.
* TODO: only gc is currently using this feature, and will have
@@ -1622,9 +1672,32 @@
self->icRechainCount = PREDICTED_CHAIN_COUNTER_RECHAIN;
self->pProfileCountdown = &gDvmJit.profileCountdown;
// Jit state that can change
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateSingle(self);
#endif
+}
+/*
+ * For a newly-created thread, we need to start off with interpBreak
+ * set to any existing global modes. The caller must hold the
+ * thread list lock.
+ */
+void dvmInitializeInterpBreak(Thread* thread)
+{
+ u1 flags = 0;
+ u1 subModes = 0;
+
+ if (gDvm.instructionCountEnableCount > 0) {
+ flags |= kInterpInstCountBreak;
+ subModes |= kSubModeInstCounting;
+ }
+ if (dvmIsMethodTraceActive()) {
+ subModes |= kSubModeMethodTrace;
+ }
+ if (gDvm.debuggerActive) {
+ flags |= kInterpDebugBreak;
+ subModes |= kSubModeDebuggerActive;
+ }
+ dvmUpdateInterpBreak(thread, flags, subModes, true);
}
/*
@@ -1702,8 +1775,8 @@
}
if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
- dvmUpdateDebugger(method, pc, fp,
- self->debugIsMethodEntry, self);
+ updateDebugger(method, pc, fp,
+ self->debugIsMethodEntry, self);
}
if (gDvm.instructionCountEnableCount != 0) {
/*
diff --git a/vm/interp/Interp.h b/vm/interp/Interp.h
index b6919af..91e2262 100644
--- a/vm/interp/Interp.h
+++ b/vm/interp/Interp.h
@@ -77,17 +77,12 @@
/*
* Debugger support
*/
-void dvmUpdateDebugger(const Method* method, const u2* pc, const u4* fp,
- bool methodEntry, Thread* self);
void dvmCheckBefore(const u2 *dPC, u4 *fp, Thread* self);
-void dvmReportExceptionThrow(Thread* self, const Method* curMethod,
- const u2* pc, void* fp);
-void dvmReportPreNativeInvoke(const u2* pc, Thread* self,
- const Method* methodToCall);
-void dvmReportPostNativeInvoke(const u2* pc, Thread* self,
- const Method* methodToCall);
+void dvmReportExceptionThrow(Thread* self, Object* exception);
+void dvmReportPreNativeInvoke(const Method* methodToCall, Thread* self);
+void dvmReportPostNativeInvoke(const Method* methodToCall, Thread* self);
void dvmReportInvoke(Thread* self, const Method* methodToCall);
-void dvmReportReturn(Thread* self, const u2* pc, const u4* prevFP);
+void dvmReportReturn(Thread* self);
/*
* Update interpBreak
@@ -95,6 +90,8 @@
void dvmUpdateInterpBreak(Thread* thread, int newBreak, int newMode,
bool enable);
void dvmAddToSuspendCounts(Thread* thread, int delta, int dbgDelta);
+void dvmCheckInterpStateConsistency();
+void dvmInitializeInterpBreak(Thread* thread);
/*
* Update interpBreak for all threads
diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c
index 0274a24..936bbc8 100644
--- a/vm/interp/Jit.c
+++ b/vm/interp/Jit.c
@@ -440,7 +440,7 @@
* free it because some thread may be holding a reference.
*/
gDvmJit.pProfTable = NULL;
- dvmJitUpdateState();
+ dvmJitUpdateThreadStateAll();
}
#if defined(WITH_JIT_TUNING)
@@ -1479,18 +1479,26 @@
}
/*
+ * Update JIT-specific info in Thread structure for a single thread
+ */
+void dvmJitUpdateThreadStateSingle(Thread* thread)
+{
+ thread->pJitProfTable = gDvmJit.pProfTable;
+ thread->jitThreshold = gDvmJit.threshold;
+}
+
+/*
* Walk through the thread list and refresh all local copies of
* JIT global state (which was placed there for fast access).
*/
-void dvmJitUpdateState()
+void dvmJitUpdateThreadStateAll()
{
Thread* self = dvmThreadSelf();
Thread* thread;
dvmLockThreadList(self);
for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
- thread->pJitProfTable = gDvmJit.pProfTable;
- thread->jitThreshold = gDvmJit.threshold;
+ dvmJitUpdateThreadStateSingle(thread);
}
dvmUnlockThreadList();
diff --git a/vm/interp/Jit.h b/vm/interp/Jit.h
index b7e0f4a..7dbe9ac 100644
--- a/vm/interp/Jit.h
+++ b/vm/interp/Jit.h
@@ -162,7 +162,8 @@
void dvmJitTraceProfilingOn(void);
void dvmJitChangeProfileMode(TraceProfilingModes newState);
void dvmJitDumpTraceDesc(JitTraceDescription *trace);
-void dvmJitUpdateState(void);
+void dvmJitUpdateThreadStateSingle(Thread* threead);
+void dvmJitUpdateThreadStateAll(void);
void dvmJitResumeTranslation(Thread* self, const u2* pc, const u4* fp);
#endif /*_DALVIK_INTERP_JIT*/
diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
index 8cd5f78..a77ce12 100644
--- a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
+++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
@@ -47,20 +47,20 @@
*/
.L${opcode}_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .L${opcode}_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .L${opcode}_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
@@ -72,6 +72,7 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .L${opcode}_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
@@ -79,13 +80,13 @@
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .L${opcode}_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
index ba19ca5..b5b9c32 100644
--- a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
+++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
@@ -65,20 +65,22 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .L${opcode}_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .L${opcode}_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index 5d35fde..b262b1e 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -656,6 +656,8 @@
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
@@ -707,9 +709,11 @@
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
@@ -720,9 +724,10 @@
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
@@ -815,10 +820,11 @@
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@@ -924,11 +930,11 @@
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
diff --git a/vm/mterp/c/OP_BREAKPOINT.c b/vm/mterp/c/OP_BREAKPOINT.c
index 0c0cbc8..fa0711e 100644
--- a/vm/mterp/c/OP_BREAKPOINT.c
+++ b/vm/mterp/c/OP_BREAKPOINT.c
@@ -8,7 +8,7 @@
* for the sake of anything that needs to do disambiguation in a
* common handler with INST_INST.
*
- * The breakpoint itself is handled over in dvmUpdateDebugger(),
+ * The breakpoint itself is handled over in updateDebugger(),
* because we need to detect other events (method entry, single
* step) and report them in the same event packet, and we're not
* yet handling those through breakpoint instructions. By the
diff --git a/vm/mterp/c/gotoTargets.c b/vm/mterp/c/gotoTargets.c
index d3190c1..50ff677 100644
--- a/vm/mterp/c/gotoTargets.c
+++ b/vm/mterp/c/gotoTargets.c
@@ -618,7 +618,8 @@
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
@@ -630,6 +631,7 @@
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
@@ -694,7 +696,8 @@
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
@@ -768,6 +771,7 @@
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
@@ -944,6 +948,7 @@
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
@@ -953,6 +958,7 @@
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
@@ -973,7 +979,8 @@
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -987,7 +994,8 @@
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
diff --git a/vm/mterp/cstubs/stubdefs.c b/vm/mterp/cstubs/stubdefs.c
index 00fb8b6..a894f9e 100644
--- a/vm/mterp/cstubs/stubdefs.c
+++ b/vm/mterp/cstubs/stubdefs.c
@@ -37,6 +37,13 @@
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index 1824d07..77be638 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -12611,20 +12611,20 @@
*/
.LOP_EXECUTE_INLINE_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
@@ -12636,6 +12636,7 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
@@ -12643,13 +12644,13 @@
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
@@ -12696,20 +12697,22 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
@@ -26857,6 +26860,8 @@
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
@@ -26908,9 +26913,11 @@
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
@@ -26921,9 +26928,10 @@
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
@@ -27016,10 +27024,11 @@
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@@ -27125,11 +27134,11 @@
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index fc4b638..b46c3ee 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -13069,20 +13069,20 @@
*/
.LOP_EXECUTE_INLINE_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
@@ -13094,6 +13094,7 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
@@ -13101,13 +13102,13 @@
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
@@ -13154,20 +13155,22 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
@@ -27315,6 +27318,8 @@
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
@@ -27366,9 +27371,11 @@
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
@@ -27379,9 +27386,10 @@
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
@@ -27474,10 +27482,11 @@
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@@ -27583,11 +27592,11 @@
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index 7132faf..4694cf7 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -12549,20 +12549,20 @@
*/
.LOP_EXECUTE_INLINE_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
@@ -12574,6 +12574,7 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
@@ -12581,13 +12582,13 @@
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
@@ -12634,20 +12635,22 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
@@ -26795,6 +26798,8 @@
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
@@ -26846,9 +26851,11 @@
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
@@ -26859,9 +26866,10 @@
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
@@ -26954,10 +26962,11 @@
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@@ -27063,11 +27072,11 @@
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index 450c537..5a291f0 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -12549,20 +12549,20 @@
*/
.LOP_EXECUTE_INLINE_continue:
rsb r0, r0, #4 @ r0<- 4-r0
- FETCH(r9, 2) @ r9<- FEDC
+ FETCH(rINST, 2) @ rINST<- FEDC
add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
bl common_abort @ (skipped due to ARM prefetch)
-4: and ip, r9, #0xf000 @ isolate F
+4: and ip, rINST, #0xf000 @ isolate F
ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
-3: and ip, r9, #0x0f00 @ isolate E
+3: and ip, rINST, #0x0f00 @ isolate E
ldr r2, [rFP, ip, lsr #6] @ r2<- vE
-2: and ip, r9, #0x00f0 @ isolate D
+2: and ip, rINST, #0x00f0 @ isolate D
ldr r1, [rFP, ip, lsr #2] @ r1<- vD
-1: and ip, r9, #0x000f @ isolate C
+1: and ip, rINST, #0x000f @ isolate C
ldr r0, [rFP, ip, lsl #2] @ r0<- vC
0:
- ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
- ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
@ (not reached)
/*
@@ -12574,6 +12574,7 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
@@ -12581,13 +12582,13 @@
mov r0, rINST, lsr #12 @ r0<- B
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, r9 @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
@@ -12634,20 +12635,22 @@
bl dvmResolveInlineNative
cmp r0, #0 @ did it resolve?
beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
mov r1, rSELF
bl dvmFastMethodTraceEnter @ (method, self)
add r1, rSELF, #offThread_retval@ r1<- &self->retval
sub sp, sp, #8 @ make room for arg, +64 bit align
- mov r0, rINST, lsr #12 @ r0<- B
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
str r1, [sp] @ push &self->retval
bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
add sp, sp, #8 @ pop stack
- cmp r0, #0 @ test boolean result of inline
- beq common_exceptionThrown @ returned false, handle exception
- mov r0, r10
- bl dvmResolveInlineNative @ reload method
+ mov r0, rINST @ r0<- method
mov r1, rSELF
- bl dvmFastMethodTraceExit @ (method, self)
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
GET_INST_OPCODE(ip) @ extract opcode from rINST
GOTO_OPCODE(ip) @ jump to next instruction
@@ -26795,6 +26798,8 @@
2:
@ Profiling - record method entry. r0: methodToCall
stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r1, r0
mov r0, rSELF
bl dvmReportInvoke @ (self, method)
@@ -26846,9 +26851,11 @@
11:
@ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
stmfd sp!, {r0-r3} @ save all but subModes
- mov r0, rPC
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPreNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPreNativeInvoke @ (methodToCall, self)
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
@@ -26859,9 +26866,10 @@
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
@ Finish up any post-invoke subMode requirements
- mov r0, rPC
+ @ interpSave already up-to-date
+ mov r0, r2 @ r0<- methodToCall
mov r1, rSELF
- bl dvmReportPostNativeInvoke @ (pc, self, methodToCall)
+ bl dvmReportPostNativeInvoke @ (methodToCall, self)
b 7b @ resume
.LstackOverflow: @ r0=methodToCall
@@ -26954,10 +26962,11 @@
19:
@ Handle special actions
@ On entry, r0: StackSaveArea
- ldr r2, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
- mov r1, rPC
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF
- bl dvmReportReturn @ (self, pc, prevFP)
+ bl dvmReportReturn @ (self)
SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
b 14b @ continue
@@ -27063,11 +27072,11 @@
@ Manage debugger bookkeeping
7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_fp] @ update interpSave.fp
mov r0, rSELF @ arg0<- self
- ldr r1, [rSELF, #offThread_method] @ arg1<- curMethod
- mov r2, rPC @ arg2<- pc
- mov r3, rFP @ arg3<- fp
- bl dvmReportExceptionThrow @ (self, method, pc, fp)
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
b 8b @ resume with normal handling
.LnotCaughtLocally: @ r9=exception
diff --git a/vm/mterp/out/InterpAsm-x86.S b/vm/mterp/out/InterpAsm-x86.S
index cafb897..ed9b4e9 100644
--- a/vm/mterp/out/InterpAsm-x86.S
+++ b/vm/mterp/out/InterpAsm-x86.S
@@ -26330,11 +26330,18 @@
GOTO_NEXT # jump to methodToCall->insns
2:
- /* Live: %eax, %ecx, %edx - preserve */
+ /*
+ * On entry, preserve all:
+ * %eax: method
+ * %ecx: self
+ * %edx: new save area
+ */
SPILL_TMP1(%eax) # preserve methodToCall
SPILL_TMP2(%edx) # preserve newSaveArea
+ movl rPC, offThread_pc(%ecx) # update interpSave.pc
+ movl rFP, offThread_fp(%ecx) # update interpSave.fp
movl %ecx, OUT_ARG0(%esp)
- movl %eax, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
call dvmReportInvoke # (self, method)
UNSPILL_TMP1(%eax)
UNSPILL_TMP2(%edx)
@@ -26379,10 +26386,11 @@
* %eax=methodToCall, rINST=newFP, %ecx=self
*/
SPILL_TMP1(%eax) # save methodTocall
- movl %eax, OUT_ARG2(%esp)
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG0(%esp)
- call dvmReportPreNativeInvoke # (pc, self, methodToCall)
+ movl rPC, offThread_pc(%ecx)
+ movl rFP, offThread_fp(%ecx)
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ call dvmReportPreNativeInvoke # (self, methodToCall)
UNSPILL_TMP1(%eax) # restore methodToCall
movl rSELF,%ecx # restore self
@@ -26396,10 +26404,9 @@
UNSPILL_TMP1(%eax) # restore methodToCall
movl rSELF, %ecx
- movl %eax, OUT_ARG2(%esp)
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG0(%esp)
- call dvmReportPostNativeInvoke # (pc, self, methodToCall)
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ call dvmReportPostNativeInvoke # (self, methodToCall)
jmp 7b # rejoin
.LstackOverflow: # eax=methodToCall
@@ -26440,10 +26447,10 @@
* Handle special subMode actions
* On entry, rFP: prevFP, %ecx: self, %eax: saveArea
*/
- movl rFP, OUT_ARG2(%esp) # parameter prevFP
- movl rPC, OUT_ARG1(%esp) # parameter pc
- movl %ecx, OUT_ARG1(%esp) # parameter self
- call dvmReportReturn # (self, pc, prevFP)
+ movl rFP, offThread_fp(%ecx) # update interpSave.fp
+ movl rPC, offThread_pc(%ecx) # update interpSave.pc
+ movl %ecx, OUT_ARG0(%esp) # parameter self
+ call dvmReportReturn # (self)
movl rSELF, %ecx # restore self
SAVEAREA_FROM_FP %eax # restore saveArea
jmp 14b
diff --git a/vm/mterp/out/InterpC-allstubs.c b/vm/mterp/out/InterpC-allstubs.c
index c6784c5..8930c44 100644
--- a/vm/mterp/out/InterpC-allstubs.c
+++ b/vm/mterp/out/InterpC-allstubs.c
@@ -419,6 +419,13 @@
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
@@ -2884,7 +2891,7 @@
* for the sake of anything that needs to do disambiguation in a
* common handler with INST_INST.
*
- * The breakpoint itself is handled over in dvmUpdateDebugger(),
+ * The breakpoint itself is handled over in updateDebugger(),
* because we need to detect other events (method entry, single
* step) and report them in the same event packet, and we're not
* yet handling those through breakpoint instructions. By the
@@ -5034,7 +5041,8 @@
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
@@ -5046,6 +5054,7 @@
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
@@ -5110,7 +5119,8 @@
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
@@ -5184,6 +5194,7 @@
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
@@ -5360,6 +5371,7 @@
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
@@ -5369,6 +5381,7 @@
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
@@ -5389,7 +5402,8 @@
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -5403,7 +5417,8 @@
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
diff --git a/vm/mterp/out/InterpC-armv5te-vfp.c b/vm/mterp/out/InterpC-armv5te-vfp.c
index f05c3ee..bcf03c1 100644
--- a/vm/mterp/out/InterpC-armv5te-vfp.c
+++ b/vm/mterp/out/InterpC-armv5te-vfp.c
@@ -419,6 +419,13 @@
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
diff --git a/vm/mterp/out/InterpC-armv5te.c b/vm/mterp/out/InterpC-armv5te.c
index 3bf9fd4..9d6e486 100644
--- a/vm/mterp/out/InterpC-armv5te.c
+++ b/vm/mterp/out/InterpC-armv5te.c
@@ -419,6 +419,13 @@
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
diff --git a/vm/mterp/out/InterpC-armv7-a-neon.c b/vm/mterp/out/InterpC-armv7-a-neon.c
index d233dbc..c83d470 100644
--- a/vm/mterp/out/InterpC-armv7-a-neon.c
+++ b/vm/mterp/out/InterpC-armv7-a-neon.c
@@ -419,6 +419,13 @@
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
diff --git a/vm/mterp/out/InterpC-armv7-a.c b/vm/mterp/out/InterpC-armv7-a.c
index 94488e1..98b8a7d 100644
--- a/vm/mterp/out/InterpC-armv7-a.c
+++ b/vm/mterp/out/InterpC-armv7-a.c
@@ -419,6 +419,13 @@
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
diff --git a/vm/mterp/out/InterpC-portable.c b/vm/mterp/out/InterpC-portable.c
index 8d29d72..967cae5 100644
--- a/vm/mterp/out/InterpC-portable.c
+++ b/vm/mterp/out/InterpC-portable.c
@@ -396,6 +396,16 @@
#define JIT_STUB_HACK(x)
/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF() \
+ self->interpSave.pc = pc; \
+ self->interpSave.fp = fp;
+
+/*
* Instruction framing. For a switch-oriented implementation this is
* case/break, for a threaded implementation it's a goto label and an
* instruction fetch/computed goto.
@@ -2897,7 +2907,7 @@
* for the sake of anything that needs to do disambiguation in a
* common handler with INST_INST.
*
- * The breakpoint itself is handled over in dvmUpdateDebugger(),
+ * The breakpoint itself is handled over in updateDebugger(),
* because we need to detect other events (method entry, single
* step) and report them in the same event packet, and we're not
* yet handling those through breakpoint instructions. By the
@@ -4984,7 +4994,8 @@
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
@@ -4996,6 +5007,7 @@
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
@@ -5060,7 +5072,8 @@
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
@@ -5134,6 +5147,7 @@
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
@@ -5310,6 +5324,7 @@
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
@@ -5319,6 +5334,7 @@
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
@@ -5339,7 +5355,8 @@
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -5353,7 +5370,8 @@
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
diff --git a/vm/mterp/out/InterpC-x86-atom.c b/vm/mterp/out/InterpC-x86-atom.c
index 1a3542c..85414bb 100644
--- a/vm/mterp/out/InterpC-x86-atom.c
+++ b/vm/mterp/out/InterpC-x86-atom.c
@@ -419,6 +419,13 @@
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
@@ -1310,7 +1317,7 @@
* for the sake of anything that needs to do disambiguation in a
* common handler with INST_INST.
*
- * The breakpoint itself is handled over in dvmUpdateDebugger(),
+ * The breakpoint itself is handled over in updateDebugger(),
* because we need to detect other events (method entry, single
* step) and report them in the same event packet, and we're not
* yet handling those through breakpoint instructions. By the
@@ -2123,7 +2130,8 @@
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
@@ -2135,6 +2143,7 @@
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
@@ -2199,7 +2208,8 @@
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
@@ -2273,6 +2283,7 @@
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
@@ -2449,6 +2460,7 @@
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
@@ -2458,6 +2470,7 @@
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
@@ -2478,7 +2491,8 @@
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -2492,7 +2506,8 @@
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
diff --git a/vm/mterp/out/InterpC-x86.c b/vm/mterp/out/InterpC-x86.c
index eb54a0f..dd25604 100644
--- a/vm/mterp/out/InterpC-x86.c
+++ b/vm/mterp/out/InterpC-x86.c
@@ -419,6 +419,13 @@
#define JIT_STUB_HACK(x)
#endif
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
/*
* Opcode handler framing macros. Here, each opcode is a separate function
@@ -2065,7 +2072,8 @@
/* Handle any special subMode requirements */
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportReturn(self, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
}
if (dvmIsBreakFrame(fp)) {
@@ -2077,6 +2085,7 @@
/* update thread FP, and reset local variables */
self->curFrame = fp;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = saveArea->savedPc;
@@ -2141,7 +2150,8 @@
* debugger.
*/
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportExceptionThrow(self, curMethod, pc, fp);
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
}
/*
@@ -2215,6 +2225,7 @@
*/
//fp = (u4*) self->curFrame;
curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
//methodClass = curMethod->clazz;
methodClassDex = curMethod->clazz->pDvmDex;
pc = curMethod->insns + catchRelPc;
@@ -2391,6 +2402,7 @@
* calls. For native calls, we'll mark EXIT on return.
* For non-native calls, EXIT is marked in the RETURN op.
*/
+ PC_FP_TO_SELF();
dvmReportInvoke(self, methodToCall);
}
@@ -2400,6 +2412,7 @@
* frame pointer and other local state, and continue.
*/
curMethod = methodToCall;
+ self->interpSave.method = curMethod;
methodClassDex = curMethod->clazz->pDvmDex;
pc = methodToCall->insns;
self->curFrame = fp = newFp;
@@ -2420,7 +2433,8 @@
DUMP_REGS(methodToCall, newFp, true); // show input args
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPreNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPreNativeInvoke(methodToCall, self);
}
ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
@@ -2434,7 +2448,8 @@
(*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
if (self->interpBreak.ctl.subMode != 0) {
- dvmReportPostNativeInvoke(pc, self, methodToCall);
+ PC_FP_TO_SELF();
+ dvmReportPostNativeInvoke(methodToCall, self);
}
/* pop frame off */
diff --git a/vm/mterp/portable/stubdefs.c b/vm/mterp/portable/stubdefs.c
index e0523c8..de9a8e7 100644
--- a/vm/mterp/portable/stubdefs.c
+++ b/vm/mterp/portable/stubdefs.c
@@ -14,6 +14,16 @@
#define JIT_STUB_HACK(x)
/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF() \
+ self->interpSave.pc = pc; \
+ self->interpSave.fp = fp;
+
+/*
* Instruction framing. For a switch-oriented implementation this is
* case/break, for a threaded implementation it's a goto label and an
* instruction fetch/computed goto.
diff --git a/vm/mterp/x86/footer.S b/vm/mterp/x86/footer.S
index 8aed133..b79d8cf 100644
--- a/vm/mterp/x86/footer.S
+++ b/vm/mterp/x86/footer.S
@@ -399,11 +399,18 @@
GOTO_NEXT # jump to methodToCall->insns
2:
- /* Live: %eax, %ecx, %edx - preserve */
+ /*
+ * On entry, preserve all:
+ * %eax: method
+ * %ecx: self
+ * %edx: new save area
+ */
SPILL_TMP1(%eax) # preserve methodToCall
SPILL_TMP2(%edx) # preserve newSaveArea
+ movl rPC, offThread_pc(%ecx) # update interpSave.pc
+ movl rFP, offThread_fp(%ecx) # update interpSave.fp
movl %ecx, OUT_ARG0(%esp)
- movl %eax, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
call dvmReportInvoke # (self, method)
UNSPILL_TMP1(%eax)
UNSPILL_TMP2(%edx)
@@ -448,10 +455,11 @@
* %eax=methodToCall, rINST=newFP, %ecx=self
*/
SPILL_TMP1(%eax) # save methodTocall
- movl %eax, OUT_ARG2(%esp)
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG0(%esp)
- call dvmReportPreNativeInvoke # (pc, self, methodToCall)
+ movl rPC, offThread_pc(%ecx)
+ movl rFP, offThread_fp(%ecx)
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ call dvmReportPreNativeInvoke # (self, methodToCall)
UNSPILL_TMP1(%eax) # restore methodToCall
movl rSELF,%ecx # restore self
@@ -465,10 +473,9 @@
UNSPILL_TMP1(%eax) # restore methodToCall
movl rSELF, %ecx
- movl %eax, OUT_ARG2(%esp)
- movl %ecx, OUT_ARG1(%esp)
- movl rPC, OUT_ARG0(%esp)
- call dvmReportPostNativeInvoke # (pc, self, methodToCall)
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ call dvmReportPostNativeInvoke # (self, methodToCall)
jmp 7b # rejoin
.LstackOverflow: # eax=methodToCall
@@ -509,10 +516,10 @@
* Handle special subMode actions
* On entry, rFP: prevFP, %ecx: self, %eax: saveArea
*/
- movl rFP, OUT_ARG2(%esp) # parameter prevFP
- movl rPC, OUT_ARG1(%esp) # parameter pc
- movl %ecx, OUT_ARG1(%esp) # parameter self
- call dvmReportReturn # (self, pc, prevFP)
+ movl rFP, offThread_fp(%ecx) # update interpSave.fp
+ movl rPC, offThread_pc(%ecx) # update interpSave.pc
+ movl %ecx, OUT_ARG0(%esp) # parameter self
+ call dvmReportReturn # (self)
movl rSELF, %ecx # restore self
SAVEAREA_FROM_FP %eax # restore saveArea
jmp 14b