New changes to enable self verification mode.
diff --git a/vm/Dvm.mk b/vm/Dvm.mk
index 2aee9d0..cc08ed7 100644
--- a/vm/Dvm.mk
+++ b/vm/Dvm.mk
@@ -212,6 +212,12 @@
LOCAL_CFLAGS += -DWITH_JIT_TUNING
endif
+# Enable JIT self verification tool. Runs compiled traces natively, then replays
+# them with the interpreter, and ensures memory and register state are the same.
+ifeq ($(WITH_SELF_VERIFICATION),true)
+ LOCAL_CFLAGS += -DWITH_SELF_VERIFICATION
+endif
+
WITH_HPROF := $(strip $(WITH_HPROF))
ifeq ($(WITH_HPROF),)
WITH_HPROF := true
diff --git a/vm/Init.c b/vm/Init.c
index 45c66e1..046db67 100644
--- a/vm/Init.c
+++ b/vm/Init.c
@@ -185,7 +185,10 @@
" resolver_cache_disabled"
#endif
#if defined(WITH_JIT)
- " with_jit"
+ " jit"
+#endif
+#if defined(WITH_SELF_VERIFICATION)
+ " self_verification"
#endif
);
#ifdef DVM_SHOW_EXCEPTION
diff --git a/vm/Thread.c b/vm/Thread.c
index 3f63132..7c9df3e 100644
--- a/vm/Thread.c
+++ b/vm/Thread.c
@@ -19,6 +19,7 @@
*/
#include "Dalvik.h"
+#include "interp/Jit.h" // need for self verification
#include "utils/threads.h" // need Android thread priorities
#include <stdlib.h>
@@ -531,7 +532,7 @@
u8 startWhen = 0; // init req'd to placate gcc
int sleepIter = 0;
int cc;
-
+
do {
cc = pthread_mutex_trylock(&gDvm._threadSuspendLock);
if (cc != 0) {
@@ -892,6 +893,11 @@
if (thread == NULL)
return NULL;
+#if defined(WITH_SELF_VERIFICATION)
+ if (dvmSelfVerificationShadowSpaceAlloc(thread) == NULL)
+ return NULL;
+#endif
+
assert(interpStackSize >= kMinStackSize && interpStackSize <=kMaxStackSize);
thread->status = THREAD_INITIALIZING;
@@ -911,6 +917,9 @@
#ifdef MALLOC_INTERP_STACK
stackBottom = (u1*) malloc(interpStackSize);
if (stackBottom == NULL) {
+#if defined(WITH_SELF_VERIFICATION)
+ dvmSelfVerificationShadowSpaceFree(thread);
+#endif
free(thread);
return NULL;
}
@@ -919,6 +928,9 @@
stackBottom = mmap(NULL, interpStackSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if (stackBottom == MAP_FAILED) {
+#if defined(WITH_SELF_VERIFICATION)
+ dvmSelfVerificationShadowSpaceFree(thread);
+#endif
free(thread);
return NULL;
}
@@ -1047,6 +1059,9 @@
if (&thread->jniMonitorRefTable.table != NULL)
dvmClearReferenceTable(&thread->jniMonitorRefTable);
+#if defined(WITH_SELF_VERIFICATION)
+ dvmSelfVerificationShadowSpaceFree(thread);
+#endif
free(thread);
}
@@ -2405,7 +2420,7 @@
char proc[100];
sprintf(proc, "/proc/%d/exe", getpid());
int len;
-
+
len = readlink(proc, exePath, sizeof(exePath)-1);
exePath[len] = '\0';
}
@@ -2646,7 +2661,7 @@
/* wait for the other thread to see the pending suspend */
waitForThreadSuspend(self, thread);
- LOG_THREAD("threadid=%d: threadid=%d status=%d c=%d dc=%d isSusp=%d\n",
+ LOG_THREAD("threadid=%d: threadid=%d status=%d c=%d dc=%d isSusp=%d\n",
self->threadId,
thread->threadId, thread->status, thread->suspendCount,
thread->dbgSuspendCount, thread->isSuspended);
@@ -3814,7 +3829,7 @@
* through the actual ThreadGroups, but it should be
* equivalent.
*
- * This assumes that the ThreadGroup class object is in
+ * This assumes that the ThreadGroup class object is in
* the root set, which should always be true; it's
* loaded by the built-in class loader, which is part
* of the root set.
diff --git a/vm/Thread.h b/vm/Thread.h
index 45018b4..bff7316 100644
--- a/vm/Thread.h
+++ b/vm/Thread.h
@@ -209,6 +209,11 @@
/* PC, saved on every instruction; redundant with StackSaveArea */
const u2* currentPc2;
#endif
+
+#if defined(WITH_SELF_VERIFICATION)
+ /* Buffer for register state during self verification */
+ struct ShadowSpace* shadowSpace;
+#endif
} Thread;
/* start point for an internal thread; mimics pthread args */
@@ -398,7 +403,7 @@
* thread is part of the GC's root set.
*/
bool dvmIsOnThreadList(const Thread* thread);
-
+
/*
* Get/set the JNIEnv field.
*/
diff --git a/vm/compiler/Compiler.h b/vm/compiler/Compiler.h
index 3b7ae54..2cf762f 100644
--- a/vm/compiler/Compiler.h
+++ b/vm/compiler/Compiler.h
@@ -62,8 +62,23 @@
kJitTSelectEnd = 5, // Done with the trace - wrap it up
kJitSingleStep = 6, // Single step interpretation
kJitSingleStepEnd = 7, // Done with single step, return to mterp
+ kJitSelfVerification = 8, // Self Verification Mode
} JitState;
+#if defined(WITH_SELF_VERIFICATION)
+typedef enum SelfVerificationState {
+ kSVSIdle = 0, // Idle
+ kSVSStart = 1, // Shadow space set up, running compiled code
+ kSVSPunt = 2, // Exiting compiled code by punting
+ kSVSSingleStep = 3, // Exiting compiled code by single stepping
+ kSVSTraceSelect = 4, // Exiting compiled code by trace select
+ kSVSNormal = 5, // Exiting compiled code normally
+ kSVSNoChain = 6, // Exiting compiled code by no chain
+ kSVSBackwardBranch = 7, // Exiting compiled code with backward branch trace
+ kSVSDebugInterp = 8, // Normal state restored, running debug interpreter
+} SelfVerificationState;
+#endif
+
typedef enum JitHint {
kJitHintNone = 0,
kJitHintTaken = 1, // Last inst in run was taken branch
diff --git a/vm/compiler/CompilerIR.h b/vm/compiler/CompilerIR.h
index d61d2ee..389ac7c 100644
--- a/vm/compiler/CompilerIR.h
+++ b/vm/compiler/CompilerIR.h
@@ -25,6 +25,7 @@
CHAINING_CELL_HOT,
CHAINING_CELL_INVOKE_SINGLETON,
CHAINING_CELL_INVOKE_PREDICTED,
+ CHAINING_CELL_BACKWARD_BRANCH,
CHAINING_CELL_LAST,
DALVIK_BYTECODE,
PC_RECONSTRUCTION,
diff --git a/vm/compiler/Frontend.c b/vm/compiler/Frontend.c
index 66060a0..6ed32ca 100644
--- a/vm/compiler/Frontend.c
+++ b/vm/compiler/Frontend.c
@@ -454,10 +454,23 @@
}
/* For unconditional branches, request a hot chaining cell */
} else {
+#if !defined(WITH_SELF_VERIFICATION)
newBB = dvmCompilerNewBB(flags & kInstrUnconditional ?
CHAINING_CELL_HOT :
CHAINING_CELL_NORMAL);
newBB->startOffset = targetOffset;
+#else
+ /* Handle branches that branch back into the block */
+ if (targetOffset >= curBB->firstMIRInsn->offset &&
+ targetOffset <= curBB->lastMIRInsn->offset) {
+ newBB = dvmCompilerNewBB(CHAINING_CELL_BACKWARD_BRANCH);
+ } else {
+ newBB = dvmCompilerNewBB(flags & kInstrUnconditional ?
+ CHAINING_CELL_HOT :
+ CHAINING_CELL_NORMAL);
+ }
+ newBB->startOffset = targetOffset;
+#endif
}
newBB->id = numBlocks++;
curBB->taken = newBB;
diff --git a/vm/compiler/codegen/arm/ArchUtility.c b/vm/compiler/codegen/arm/ArchUtility.c
index 60c5cdb..7f6d284 100644
--- a/vm/compiler/codegen/arm/ArchUtility.c
+++ b/vm/compiler/codegen/arm/ArchUtility.c
@@ -209,6 +209,9 @@
((Method *)dest)->name,
((Method *)dest)->insns);
break;
+ case ARM_PSEUDO_CHAINING_CELL_BACKWARD_BRANCH:
+ LOGD("-------- chaining cell (backward branch): 0x%04x\n", dest);
+ break;
case ARM_PSEUDO_DALVIK_BYTECODE_BOUNDARY:
LOGD("-------- dalvik offset: 0x%04x @ %s\n", dest,
getOpcodeName(lir->operands[1]));
diff --git a/vm/compiler/codegen/arm/ArmLIR.h b/vm/compiler/codegen/arm/ArmLIR.h
index 59c7529..1ecbbf1 100644
--- a/vm/compiler/codegen/arm/ArmLIR.h
+++ b/vm/compiler/codegen/arm/ArmLIR.h
@@ -154,7 +154,8 @@
* Assemble.c.
*/
typedef enum ArmOpCode {
- ARM_PSEUDO_TARGET_LABEL = -11,
+ ARM_PSEUDO_TARGET_LABEL = -12,
+ ARM_PSEUDO_CHAINING_CELL_BACKWARD_BRANCH = -11,
ARM_PSEUDO_CHAINING_CELL_HOT = -10,
ARM_PSEUDO_CHAINING_CELL_INVOKE_PREDICTED = -9,
ARM_PSEUDO_CHAINING_CELL_INVOKE_SINGLETON = -8,
diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c
index ea133e7..fc740b6 100644
--- a/vm/compiler/codegen/arm/Assemble.c
+++ b/vm/compiler/codegen/arm/Assemble.c
@@ -902,6 +902,12 @@
PredictedChainingCell *cell,
const ClassObject *clazz)
{
+#if defined(WITH_SELF_VERIFICATION)
+ /* Disable chaining and prevent this from triggering again for a while */
+ cell->counter = PREDICTED_CHAIN_COUNTER_AVOID;
+ cacheflush((long) cell, (long) (cell+1), 0);
+ goto done;
+#else
/* Don't come back here for a long time if the method is native */
if (dvmIsNativeMethod(method)) {
cell->counter = PREDICTED_CHAIN_COUNTER_AVOID;
@@ -951,6 +957,7 @@
/* All done - resume all other threads */
dvmResumeAllThreads(SUSPEND_FOR_JIT);
+#endif
done:
return method;
@@ -1018,6 +1025,12 @@
predChainCell->method = PREDICTED_CHAIN_METHOD_INIT;
predChainCell->counter = PREDICTED_CHAIN_COUNTER_INIT;
break;
+#if defined(WITH_SELF_VERIFICATION)
+ case CHAINING_CELL_BACKWARD_BRANCH:
+ targetOffset = offsetof(InterpState,
+ jitToInterpEntries.dvmJitToBackwardBranch);
+ break;
+#endif
default:
dvmAbort();
}
diff --git a/vm/compiler/codegen/arm/Codegen.c b/vm/compiler/codegen/arm/Codegen.c
index b6c266d..83b9b24 100644
--- a/vm/compiler/codegen/arm/Codegen.c
+++ b/vm/compiler/codegen/arm/Codegen.c
@@ -31,6 +31,456 @@
/* Track exercised opcodes */
static int opcodeCoverage[256];
+#if defined(WITH_SELF_VERIFICATION)
+/* Prevent certain opcodes from being jitted */
+static inline bool selfVerificationPuntOps(OpCode op)
+{
+ return (op == OP_MONITOR_ENTER || op == OP_MONITOR_EXIT ||
+ op == OP_NEW_INSTANCE || op == OP_NEW_ARRAY);
+}
+
+/*
+ * The following are used to keep compiled loads and stores from modifying
+ * memory during self verification mode.
+ *
+ * Stores do not modify memory. Instead, the address and value pair are stored
+ * into heapSpace. Addresses within heapSpace are unique. For accesses smaller
+ * than a word, the word containing the address is loaded first before being
+ * updated.
+ *
+ * Loads check heapSpace first and return data from there if an entry exists.
+ * Otherwise, data is loaded from memory as usual.
+ */
+
+/* Decode contents of heapArgSpace to determine addr to load from */
+static void selfVerificationLoadDecode(HeapArgSpace* heapArgSpace, int* addr)
+{
+ int reg = heapArgSpace->regMap & 0xF;
+
+ switch (reg) {
+ case 0:
+ *addr = heapArgSpace->r0;
+ break;
+ case 1:
+ *addr = heapArgSpace->r1;
+ break;
+ case 2:
+ *addr = heapArgSpace->r2;
+ break;
+ case 3:
+ *addr = heapArgSpace->r3;
+ break;
+ default:
+ LOGE("ERROR: bad reg used in selfVerificationLoadDecode: %d", reg);
+ break;
+ }
+}
+
+/* Decode contents of heapArgSpace to determine reg to load into */
+static void selfVerificationLoadDecodeData(HeapArgSpace* heapArgSpace,
+ int data, int reg)
+{
+ switch (reg) {
+ case 0:
+ heapArgSpace->r0 = data;
+ break;
+ case 1:
+ heapArgSpace->r1 = data;
+ break;
+ case 2:
+ heapArgSpace->r2 = data;
+ break;
+ case 3:
+ heapArgSpace->r3 = data;
+ break;
+ default:
+ LOGE("ERROR: bad reg passed to selfVerificationLoadDecodeData: %d",
+ reg);
+ break;
+ }
+}
+
+static void selfVerificationLoad(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap *heapSpacePtr;
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data;
+ selfVerificationLoadDecode(heapArgSpace, &addr);
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == addr) {
+ data = heapSpacePtr->data;
+ break;
+ }
+ }
+
+ if (heapSpacePtr == shadowSpace->heapSpaceTail)
+ data = *((unsigned int*) addr);
+
+ //LOGD("*** HEAP LOAD: Addr: 0x%x Data: 0x%x", addr, data);
+
+ int reg = (heapArgSpace->regMap >> 4) & 0xF;
+ selfVerificationLoadDecodeData(heapArgSpace, data, reg);
+}
+
+static void selfVerificationLoadByte(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap *heapSpacePtr;
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data;
+ selfVerificationLoadDecode(heapArgSpace, &addr);
+
+ int maskedAddr = addr & 0xFFFFFFFC;
+ int alignment = addr & 0x3;
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == maskedAddr) {
+ addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
+ data = *((unsigned char*) addr);
+ break;
+ }
+ }
+
+ if (heapSpacePtr == shadowSpace->heapSpaceTail)
+ data = *((unsigned char*) addr);
+
+ //LOGD("*** HEAP LOAD BYTE: Addr: 0x%x Data: 0x%x", addr, data);
+
+ int reg = (heapArgSpace->regMap >> 4) & 0xF;
+ selfVerificationLoadDecodeData(heapArgSpace, data, reg);
+}
+
+static void selfVerificationLoadHalfword(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap *heapSpacePtr;
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data;
+ selfVerificationLoadDecode(heapArgSpace, &addr);
+
+ int maskedAddr = addr & 0xFFFFFFFC;
+ int alignment = addr & 0x2;
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == maskedAddr) {
+ addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
+ data = *((unsigned short*) addr);
+ break;
+ }
+ }
+
+ if (heapSpacePtr == shadowSpace->heapSpaceTail)
+ data = *((unsigned short*) addr);
+
+ //LOGD("*** HEAP LOAD HALFWORD: Addr: 0x%x Data: 0x%x", addr, data);
+
+ int reg = (heapArgSpace->regMap >> 4) & 0xF;
+ selfVerificationLoadDecodeData(heapArgSpace, data, reg);
+}
+
+static void selfVerificationLoadSignedByte(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap* heapSpacePtr;
+ ShadowSpace* shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data;
+ selfVerificationLoadDecode(heapArgSpace, &addr);
+
+ int maskedAddr = addr & 0xFFFFFFFC;
+ int alignment = addr & 0x3;
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == maskedAddr) {
+ addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
+ data = *((signed char*) addr);
+ break;
+ }
+ }
+
+ if (heapSpacePtr == shadowSpace->heapSpaceTail)
+ data = *((signed char*) addr);
+
+ //LOGD("*** HEAP LOAD SIGNED BYTE: Addr: 0x%x Data: 0x%x", addr, data);
+
+ int reg = (heapArgSpace->regMap >> 4) & 0xF;
+ selfVerificationLoadDecodeData(heapArgSpace, data, reg);
+}
+
+static void selfVerificationLoadSignedHalfword(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap* heapSpacePtr;
+ ShadowSpace* shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data;
+ selfVerificationLoadDecode(heapArgSpace, &addr);
+
+ int maskedAddr = addr & 0xFFFFFFFC;
+ int alignment = addr & 0x2;
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == maskedAddr) {
+ addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
+ data = *((signed short*) addr);
+ break;
+ }
+ }
+
+ if (heapSpacePtr == shadowSpace->heapSpaceTail)
+ data = *((signed short*) addr);
+
+ //LOGD("*** HEAP LOAD SIGNED HALFWORD: Addr: 0x%x Data: 0x%x", addr, data);
+
+ int reg = (heapArgSpace->regMap >> 4) & 0xF;
+ selfVerificationLoadDecodeData(heapArgSpace, data, reg);
+}
+
+static void selfVerificationLoadDoubleword(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap* heapSpacePtr;
+ ShadowSpace* shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr;
+ selfVerificationLoadDecode(heapArgSpace, &addr);
+
+ int addr2 = addr+4;
+ unsigned int data = *((unsigned int*) addr);
+ unsigned int data2 = *((unsigned int*) addr2);
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == addr) {
+ data = heapSpacePtr->data;
+ } else if (heapSpacePtr->addr == addr2) {
+ data2 = heapSpacePtr->data;
+ }
+ }
+
+ //LOGD("*** HEAP LOAD DOUBLEWORD: Addr: 0x%x Data: 0x%x Data2: 0x%x",
+ // addr, data, data2);
+
+ int reg = (heapArgSpace->regMap >> 4) & 0xF;
+ int reg2 = (heapArgSpace->regMap >> 8) & 0xF;
+ selfVerificationLoadDecodeData(heapArgSpace, data, reg);
+ selfVerificationLoadDecodeData(heapArgSpace, data2, reg2);
+}
+
+/* Decode contents of heapArgSpace to determine arguments to store. */
+static void selfVerificationStoreDecode(HeapArgSpace* heapArgSpace,
+ int* value, int reg)
+{
+ switch (reg) {
+ case 0:
+ *value = heapArgSpace->r0;
+ break;
+ case 1:
+ *value = heapArgSpace->r1;
+ break;
+ case 2:
+ *value = heapArgSpace->r2;
+ break;
+ case 3:
+ *value = heapArgSpace->r3;
+ break;
+ default:
+ LOGE("ERROR: bad reg passed to selfVerificationStoreDecode: %d",
+ reg);
+ break;
+ }
+}
+
+static void selfVerificationStore(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap *heapSpacePtr;
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data;
+ int reg0 = heapArgSpace->regMap & 0xF;
+ int reg1 = (heapArgSpace->regMap >> 4) & 0xF;
+ selfVerificationStoreDecode(heapArgSpace, &addr, reg0);
+ selfVerificationStoreDecode(heapArgSpace, &data, reg1);
+
+ //LOGD("*** HEAP STORE: Addr: 0x%x Data: 0x%x", addr, data);
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == addr) break;
+ }
+
+ if (heapSpacePtr == shadowSpace->heapSpaceTail) {
+ heapSpacePtr->addr = addr;
+ shadowSpace->heapSpaceTail++;
+ }
+
+ heapSpacePtr->data = data;
+}
+
+static void selfVerificationStoreByte(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap *heapSpacePtr;
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data;
+ int reg0 = heapArgSpace->regMap & 0xF;
+ int reg1 = (heapArgSpace->regMap >> 4) & 0xF;
+ selfVerificationStoreDecode(heapArgSpace, &addr, reg0);
+ selfVerificationStoreDecode(heapArgSpace, &data, reg1);
+
+ int maskedAddr = addr & 0xFFFFFFFC;
+ int alignment = addr & 0x3;
+
+ //LOGD("*** HEAP STORE BYTE: Addr: 0x%x Data: 0x%x", addr, data);
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == maskedAddr) break;
+ }
+
+ if (heapSpacePtr == shadowSpace->heapSpaceTail) {
+ heapSpacePtr->addr = maskedAddr;
+ heapSpacePtr->data = *((unsigned int*) maskedAddr);
+ shadowSpace->heapSpaceTail++;
+ }
+
+ addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
+ *((unsigned char*) addr) = (char) data;
+
+ //LOGD("*** HEAP STORE BYTE: Addr: 0x%x Final Data: 0x%x",
+ // addr, heapSpacePtr->data);
+}
+
+static void selfVerificationStoreHalfword(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap *heapSpacePtr;
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data;
+ int reg0 = heapArgSpace->regMap & 0xF;
+ int reg1 = (heapArgSpace->regMap >> 4) & 0xF;
+ selfVerificationStoreDecode(heapArgSpace, &addr, reg0);
+ selfVerificationStoreDecode(heapArgSpace, &data, reg1);
+
+ int maskedAddr = addr & 0xFFFFFFFC;
+ int alignment = addr & 0x2;
+
+ //LOGD("*** HEAP STORE HALFWORD: Addr: 0x%x Data: 0x%x", addr, data);
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == maskedAddr) break;
+ }
+
+ if (heapSpacePtr == shadowSpace->heapSpaceTail) {
+ heapSpacePtr->addr = maskedAddr;
+ heapSpacePtr->data = *((unsigned int*) maskedAddr);
+ shadowSpace->heapSpaceTail++;
+ }
+
+ addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
+ *((unsigned short*) addr) = (short) data;
+
+ //LOGD("*** HEAP STORE HALFWORD: Addr: 0x%x Final Data: 0x%x",
+ // addr, heapSpacePtr->data);
+}
+
+static void selfVerificationStoreDoubleword(InterpState* interpState)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowHeap *heapSpacePtr;
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ HeapArgSpace *heapArgSpace = &(interpState->heapArgSpace);
+
+ int addr, data, data2;
+ int reg0 = heapArgSpace->regMap & 0xF;
+ int reg1 = (heapArgSpace->regMap >> 4) & 0xF;
+ int reg2 = (heapArgSpace->regMap >> 8) & 0xF;
+ selfVerificationStoreDecode(heapArgSpace, &addr, reg0);
+ selfVerificationStoreDecode(heapArgSpace, &data, reg1);
+ selfVerificationStoreDecode(heapArgSpace, &data2, reg2);
+
+ int addr2 = addr+4;
+ bool store1 = false, store2 = false;
+
+ //LOGD("*** HEAP STORE DOUBLEWORD: Addr: 0x%x Data: 0x%x, Data2: 0x%x",
+ // addr, data, data2);
+
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ if (heapSpacePtr->addr == addr) {
+ heapSpacePtr->data = data;
+ store1 = true;
+ } else if (heapSpacePtr->addr == addr2) {
+ heapSpacePtr->data = data2;
+ store2 = true;
+ }
+ }
+
+ if (!store1) {
+ shadowSpace->heapSpaceTail->addr = addr;
+ shadowSpace->heapSpaceTail->data = data;
+ shadowSpace->heapSpaceTail++;
+ }
+ if (!store2) {
+ shadowSpace->heapSpaceTail->addr = addr2;
+ shadowSpace->heapSpaceTail->data = data2;
+ shadowSpace->heapSpaceTail++;
+ }
+}
+
+/* Common wrapper function for all memory operations */
+static void selfVerificationMemOpWrapper(CompilationUnit *cUnit, int regMap,
+ void* funct)
+{
+ int regMask = (1 << r4PC) | (1 << r3) | (1 << r2) | (1 << r1) | (1 << r0);
+
+ /* r7 <- InterpState->heapArgSpace */
+ loadConstant(cUnit, r4PC, offsetof(InterpState, heapArgSpace));
+ newLIR3(cUnit, THUMB_ADD_RRR, r7, rGLUE, r4PC);
+
+ /* Save out values to heapArgSpace */
+ loadConstant(cUnit, r4PC, regMap);
+ newLIR2(cUnit, THUMB_STMIA, r7, regMask);
+
+ /* Pass interpState pointer to function */
+ newLIR2(cUnit, THUMB_MOV_RR, r0, rGLUE);
+
+ /* Set function pointer and branch */
+ loadConstant(cUnit, r1, (int) funct);
+ newLIR1(cUnit, THUMB_BLX_R, r1);
+
+ /* r7 <- InterpState->heapArgSpace */
+ loadConstant(cUnit, r4PC, offsetof(InterpState, heapArgSpace));
+ newLIR3(cUnit, THUMB_ADD_RRR, r7, rGLUE, r4PC);
+
+ /* Restore register state */
+ newLIR2(cUnit, THUMB_LDMIA, r7, regMask);
+}
+#endif
+
/*****************************************************************************/
/*
@@ -284,8 +734,16 @@
loadConstant(cUnit, reg3, fieldOffset);
genNullCheck(cUnit, dInsn->vB, reg2, mir->offset, NULL); /* null object? */
newLIR3(cUnit, THUMB_ADD_RRR, reg2, reg2, reg3);
+#if !defined(WITH_SELF_VERIFICATION)
newLIR2(cUnit, THUMB_LDMIA, reg2, (1<<reg0 | 1<<reg1));
storeValuePair(cUnit, reg0, reg1, dInsn->vA, reg3);
+#else
+ int regMap = reg1 << 8 | reg0 << 4 | reg2;
+ selfVerificationMemOpWrapper(cUnit, regMap,
+ &selfVerificationLoadDoubleword);
+
+ storeValuePair(cUnit, reg0, reg1, dInsn->vA, reg3);
+#endif
}
/* Store a wide field to an object instance */
@@ -313,7 +771,13 @@
loadConstant(cUnit, reg3, fieldOffset);
genNullCheck(cUnit, dInsn->vB, reg2, mir->offset, NULL); /* null object? */
newLIR3(cUnit, THUMB_ADD_RRR, reg2, reg2, reg3);
+#if !defined(WITH_SELF_VERIFICATION)
newLIR2(cUnit, THUMB_STMIA, reg2, (1<<reg0 | 1<<reg1));
+#else
+ int regMap = reg1 << 8 | reg0 << 4 | reg2;
+ selfVerificationMemOpWrapper(cUnit, regMap,
+ &selfVerificationStoreDoubleword);
+#endif
}
/*
@@ -338,8 +802,18 @@
loadValue(cUnit, dInsn->vB, reg0);
loadConstant(cUnit, reg1, fieldOffset);
genNullCheck(cUnit, dInsn->vB, reg0, mir->offset, NULL); /* null object? */
+#if !defined(WITH_SELF_VERIFICATION)
newLIR3(cUnit, inst, reg0, reg0, reg1);
storeValue(cUnit, reg0, dInsn->vA, reg1);
+#else
+ /* Combine address and offset */
+ newLIR3(cUnit, THUMB_ADD_RRR, reg0, reg0, reg1);
+
+ int regMap = reg0 << 4 | reg0;
+ selfVerificationMemOpWrapper(cUnit, regMap, &selfVerificationLoad);
+
+ storeValue(cUnit, reg0, dInsn->vA, reg1);
+#endif
}
/*
@@ -366,7 +840,17 @@
loadValue(cUnit, dInsn->vA, reg2);
updateLiveRegister(cUnit, dInsn->vA, reg2);
genNullCheck(cUnit, dInsn->vB, reg0, mir->offset, NULL); /* null object? */
+#if !defined(WITH_SELF_VERIFICATION)
newLIR3(cUnit, inst, reg2, reg0, reg1);
+#else
+ /* Combine address and offset */
+ newLIR3(cUnit, THUMB_ADD_RRR, reg0, reg0, reg1);
+
+ int regMap = reg2 << 4 | reg0;
+ selfVerificationMemOpWrapper(cUnit, regMap, &selfVerificationStore);
+
+ newLIR3(cUnit, THUMB_SUB_RRR, reg0, reg0, reg1);
+#endif
}
@@ -406,6 +890,7 @@
if (scale) {
newLIR3(cUnit, THUMB_LSL, reg3, reg3, scale);
}
+#if !defined(WITH_SELF_VERIFICATION)
if (scale==3) {
newLIR3(cUnit, inst, reg0, reg2, reg3);
newLIR2(cUnit, THUMB_ADD_RI8, reg2, 4);
@@ -415,6 +900,46 @@
newLIR3(cUnit, inst, reg0, reg2, reg3);
storeValue(cUnit, reg0, vDest, reg3);
}
+#else
+ void* funct;
+ switch (scale) {
+ case 0:
+ if (inst == THUMB_LDRSB_RRR)
+ funct = (void*) &selfVerificationLoadSignedByte;
+ else
+ funct = (void*) &selfVerificationLoadByte;
+ break;
+ case 1:
+ if (inst == THUMB_LDRSH_RRR)
+ funct = (void*) &selfVerificationLoadSignedHalfword;
+ else
+ funct = (void*) &selfVerificationLoadHalfword;
+ break;
+ case 2:
+ funct = (void*) &selfVerificationLoad;
+ break;
+ case 3:
+ funct = (void*) &selfVerificationLoadDoubleword;
+ break;
+ default:
+ LOGE("ERROR: bad scale value in genArrayGet: %d", scale);
+ funct = (void*) &selfVerificationLoad;
+ break;
+ }
+
+ /* Combine address and offset */
+ newLIR3(cUnit, THUMB_ADD_RRR, reg2, reg2, reg3);
+
+ int regMap = reg1 << 8 | reg0 << 4 | reg2;
+ selfVerificationMemOpWrapper(cUnit, regMap, funct);
+
+ newLIR3(cUnit, THUMB_SUB_RRR, reg2, reg2, reg3);
+
+ if (scale==3)
+ storeValuePair(cUnit, reg0, reg1, vDest, reg3);
+ else
+ storeValue(cUnit, reg0, vDest, reg3);
+#endif
}
/* TODO: This should probably be done as an out-of-line instruction handler. */
@@ -463,6 +988,7 @@
* at this point, reg2 points to array, reg3 is scaled index, and
* reg0[reg1] is data
*/
+#if !defined(WITH_SELF_VERIFICATION)
if (scale==3) {
newLIR3(cUnit, inst, reg0, reg2, reg3);
newLIR2(cUnit, THUMB_ADD_RI8, reg2, 4);
@@ -470,6 +996,35 @@
} else {
newLIR3(cUnit, inst, reg0, reg2, reg3);
}
+#else
+ void *funct;
+ switch (scale) {
+ case 0:
+ funct = (void*) &selfVerificationStoreByte;
+ break;
+ case 1:
+ funct = (void*) &selfVerificationStoreHalfword;
+ break;
+ case 2:
+ funct = (void*) &selfVerificationStore;
+ break;
+ case 3:
+ funct = (void*) &selfVerificationStoreDoubleword;
+ break;
+ default:
+ LOGE("ERROR: bad scale value in genArrayPut: %d", scale);
+ funct = (void*) &selfVerificationStore;
+ break;
+ }
+
+ /* Combine address and offset */
+ newLIR3(cUnit, THUMB_ADD_RRR, reg2, reg2, reg3);
+
+ int regMap = reg1 << 8 | reg0 << 4 | reg2;
+ selfVerificationMemOpWrapper(cUnit, regMap, funct);
+
+ newLIR3(cUnit, THUMB_SUB_RRR, reg2, reg2, reg3);
+#endif
}
static bool genShiftOpLong(CompilationUnit *cUnit, MIR *mir, int vDest,
@@ -1545,8 +2100,15 @@
(cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
assert(fieldPtr != NULL);
loadConstant(cUnit, regvNone, (int) fieldPtr + valOffset);
+#if !defined(WITH_SELF_VERIFICATION)
newLIR3(cUnit, THUMB_LDR_RRI5, regvNone, regvNone, 0);
storeValue(cUnit, regvNone, mir->dalvikInsn.vA, NEXT_REG(regvNone));
+#else
+ int regMap = regvNone << 4 | regvNone;
+ selfVerificationMemOpWrapper(cUnit, regMap, &selfVerificationLoad);
+
+ storeValue(cUnit, regvNone, mir->dalvikInsn.vA, NEXT_REG(regvNone));
+#endif
break;
}
case OP_SGET_WIDE: {
@@ -1560,8 +2122,16 @@
reg1 = NEXT_REG(reg0);
reg2 = NEXT_REG(reg1);
loadConstant(cUnit, reg2, (int) fieldPtr + valOffset);
+#if !defined(WITH_SELF_VERIFICATION)
newLIR2(cUnit, THUMB_LDMIA, reg2, (1<<reg0 | 1<<reg1));
storeValuePair(cUnit, reg0, reg1, mir->dalvikInsn.vA, reg2);
+#else
+ int regMap = reg1 << 8 | reg0 << 4 | reg2;
+ selfVerificationMemOpWrapper(cUnit, regMap,
+ &selfVerificationLoadDoubleword);
+
+ storeValuePair(cUnit, reg0, reg1, mir->dalvikInsn.vA, reg2);
+#endif
break;
}
case OP_SPUT_OBJECT:
@@ -1578,7 +2148,12 @@
loadValue(cUnit, mir->dalvikInsn.vA, regvA);
updateLiveRegister(cUnit, mir->dalvikInsn.vA, regvA);
loadConstant(cUnit, NEXT_REG(regvA), (int) fieldPtr + valOffset);
+#if !defined(WITH_SELF_VERIFICATION)
newLIR3(cUnit, THUMB_STR_RRI5, regvA, NEXT_REG(regvA), 0);
+#else
+ int regMap = regvA << 4 | NEXT_REG(regvA);
+ selfVerificationMemOpWrapper(cUnit, regMap, &selfVerificationStore);
+#endif
break;
}
case OP_SPUT_WIDE: {
@@ -1594,7 +2169,13 @@
loadValuePair(cUnit, mir->dalvikInsn.vA, reg0, reg1);
updateLiveRegisterPair(cUnit, mir->dalvikInsn.vA, reg0, reg1);
loadConstant(cUnit, reg2, (int) fieldPtr + valOffset);
+#if !defined(WITH_SELF_VERIFICATION)
newLIR2(cUnit, THUMB_STMIA, reg2, (1<<reg0 | 1<<reg1));
+#else
+ int regMap = reg1 << 8 | reg0 << 4 | reg2;
+ selfVerificationMemOpWrapper(cUnit, regMap,
+ &selfVerificationStoreDoubleword);
+#endif
break;
}
case OP_NEW_INSTANCE: {
@@ -2909,6 +3490,18 @@
addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
}
+#if defined(WITH_SELF_VERIFICATION)
+/* Chaining cell for branches that branch back into the same basic block */
+static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
+ unsigned int offset)
+{
+ newLIR3(cUnit, THUMB_LDR_RRI5, r0, rGLUE,
+ offsetof(InterpState, jitToInterpEntries.dvmJitToBackwardBranch) >> 2);
+ newLIR1(cUnit, THUMB_BLX_R, r0);
+ addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
+}
+
+#endif
/* Chaining cell for monomorphic method invocations. */
static void handleInvokeSingletonChainingCell(CompilationUnit *cUnit,
const Method *callee)
@@ -3073,6 +3666,16 @@
newLIR1(cUnit, THUMB_BLX_R, r1);
}
break;
+#if defined(WITH_SELF_VERIFICATION)
+ case CHAINING_CELL_BACKWARD_BRANCH:
+ labelList[i].opCode =
+ ARM_PSEUDO_CHAINING_CELL_BACKWARD_BRANCH;
+ /* handle the codegen later */
+ dvmInsertGrowableList(
+ &chainingListByType[CHAINING_CELL_BACKWARD_BRANCH],
+ (void *) i);
+ break;
+#endif
default:
break;
}
@@ -3102,6 +3705,11 @@
((gDvmJit.opList[dalvikOpCode >> 3] &
(1 << (dalvikOpCode & 0x7))) !=
0);
+#if defined(WITH_SELF_VERIFICATION)
+ /* Punt on opcodes we can't replay */
+ if (selfVerificationPuntOps(dalvikOpCode))
+ singleStepMe = true;
+#endif
if (singleStepMe || cUnit->allSingleStep) {
notHandled = false;
genInterpSingleStep(cUnit, mir);
@@ -3252,6 +3860,12 @@
handleHotChainingCell(cUnit,
blockList[blockId]->startOffset);
break;
+#if defined(WITH_SELF_VERIFICATION)
+ case CHAINING_CELL_BACKWARD_BRANCH:
+ handleBackwardBranchChainingCell(cUnit,
+ blockList[blockId]->startOffset);
+ break;
+#endif
default:
dvmAbort();
break;
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
index 5e59991..fb656c0 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -22,7 +22,11 @@
str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
cmp r8, #0 @ suspendCount != 0
ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
@ go ahead and transfer control to the native code
ldr r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
index 0ac7cf8..ec37bf9 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -31,7 +31,11 @@
cmp r8, #0 @ suspendCount != 0
bxne lr @ bail to the interpreter
tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
ldr r10, .LdvmJitToInterpNoChain
ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index f0a4623..1e4aad2 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -9,12 +9,20 @@
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
ldr r0, .LdvmJitToInterpNoChain @ defined in footer.S
mov rFP, r10 @ publish new FP
ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 2c669f8..815a0d4 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -180,12 +180,20 @@
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
ldr r0, .LdvmJitToInterpNoChain @ defined in footer.S
mov rFP, r10 @ publish new FP
ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz
@@ -246,7 +254,11 @@
cmp r8, #0 @ suspendCount != 0
bxne lr @ bail to the interpreter
tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
ldr r10, .LdvmJitToInterpNoChain
ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
@@ -391,7 +403,11 @@
str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
cmp r8, #0 @ suspendCount != 0
ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
@ go ahead and transfer control to the native code
ldr r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index b973691..cb134e2 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -180,12 +180,20 @@
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
ldr r0, .LdvmJitToInterpNoChain @ defined in footer.S
mov rFP, r10 @ publish new FP
ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz
@@ -246,7 +254,11 @@
cmp r8, #0 @ suspendCount != 0
bxne lr @ bail to the interpreter
tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
ldr r10, .LdvmJitToInterpNoChain
ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
@@ -391,7 +403,11 @@
str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
cmp r8, #0 @ suspendCount != 0
ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
@ go ahead and transfer control to the native code
ldr r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index 1faf7f6..1382c2a 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -180,12 +180,20 @@
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
@ r2<- method we're returning to
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
ldr r0, .LdvmJitToInterpNoChain @ defined in footer.S
mov rFP, r10 @ publish new FP
ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz
@@ -246,7 +254,11 @@
cmp r8, #0 @ suspendCount != 0
bxne lr @ bail to the interpreter
tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
ldr r10, .LdvmJitToInterpNoChain
ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
@@ -391,7 +403,11 @@
str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
cmp r8, #0 @ suspendCount != 0
ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
@ go ahead and transfer control to the native code
ldr r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext
diff --git a/vm/interp/Interp.c b/vm/interp/Interp.c
index f45e21a..a03acf9 100644
--- a/vm/interp/Interp.c
+++ b/vm/interp/Interp.c
@@ -868,6 +868,9 @@
extern void dvmJitToInterpSingleStep();
extern void dvmJitToTraceSelect();
extern void dvmJitToPatchPredictedChain();
+#if defined(WITH_SELF_VERIFICATION)
+ extern void dvmJitToBackwardBranch();
+#endif
/*
* Reserve a static entity here to quickly setup runtime contents as
@@ -880,6 +883,9 @@
dvmJitToInterpSingleStep,
dvmJitToTraceSelect,
dvmJitToPatchPredictedChain,
+#if defined(WITH_SELF_VERIFICATION)
+ dvmJitToBackwardBranch,
+#endif
};
#endif
diff --git a/vm/interp/InterpDefs.h b/vm/interp/InterpDefs.h
index c9c80e3..928ca09 100644
--- a/vm/interp/InterpDefs.h
+++ b/vm/interp/InterpDefs.h
@@ -38,6 +38,16 @@
} InterpEntry;
#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+typedef struct HeapArgSpace {
+ int r0;
+ int r1;
+ int r2;
+ int r3;
+ int regMap;
+} HeapArgSpace;
+#endif
+
/*
* There are six entry points from the compiled code to the interpreter:
* 1) dvmJitToInterpNormal: find if there is a corresponding compilation for
@@ -64,6 +74,9 @@
* just been reached).
* 6) dvmJitToPredictedChain: patch the chaining cell for a virtual call site
* to a predicted callee.
+ * 7) dvmJitToBackwardBranch: (WITH_SELF_VERIFICATION ONLY) special case of 1)
+ * and 5). This is used instead if the ending branch of the trace jumps back
+ * into the same basic block.
*/
struct JitToInterpEntries {
void *dvmJitToInterpNormal;
@@ -72,6 +85,9 @@
void *dvmJitToInterpSingleStep;
void *dvmJitToTraceSelect;
void *dvmJitToPatchPredictedChain;
+#if defined(WITH_SELF_VERIFICATION)
+ void *dvmJitToBackwardBranch;
+#endif
};
#define JIT_TRACE_THRESH_FILTER_SIZE 16
@@ -149,6 +165,9 @@
const u2* currRunHead; // Start of run we're building
int currRunLen; // Length of run in 16-bit words
int lastThreshFilter;
+#if defined(WITH_SELF_VERIFICATION)
+ struct HeapArgSpace heapArgSpace;
+#endif
const u2* threshFilter[JIT_TRACE_THRESH_FILTER_SIZE];
JitTraceRun trace[MAX_JIT_RUN_LEN];
#endif
diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c
index 9079da1..3dca45b 100644
--- a/vm/interp/Jit.c
+++ b/vm/interp/Jit.c
@@ -33,6 +33,285 @@
#include "compiler/CompilerIR.h"
#include <errno.h>
+#if defined(WITH_SELF_VERIFICATION)
+/* Allocate space for per-thread ShadowSpace data structures */
+void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
+{
+ self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
+ if (self->shadowSpace == NULL)
+ return NULL;
+
+ self->shadowSpace->registerSpaceSize = REG_SPACE;
+ self->shadowSpace->registerSpace =
+ (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
+
+ return self->shadowSpace->registerSpace;
+}
+
+/* Free per-thread ShadowSpace data structures */
+void dvmSelfVerificationShadowSpaceFree(Thread* self)
+{
+ free(self->shadowSpace->registerSpace);
+ free(self->shadowSpace);
+}
+
+/*
+ * Save out PC, FP, InterpState, and registers to shadow space.
+ * Return a pointer to the shadow space for JIT to use.
+ */
+void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
+ void* interpStatePtr)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ InterpState *interpState = (InterpState *) interpStatePtr;
+ int preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
+ int postBytes = interpState->method->registersSize*4;
+
+ //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
+ // self->threadId, (int)pc, (int)fp);
+
+ if (shadowSpace->selfVerificationState != kSVSIdle) {
+ LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
+ self->threadId, shadowSpace->selfVerificationState);
+ LOGD("********** SHADOW STATE DUMP **********");
+ LOGD("* PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
+ }
+ shadowSpace->selfVerificationState = kSVSStart;
+
+ // Dynamically grow shadow register space if necessary
+ while (preBytes + postBytes > shadowSpace->registerSpaceSize) {
+ shadowSpace->registerSpaceSize *= 2;
+ free(shadowSpace->registerSpace);
+ shadowSpace->registerSpace =
+ (int*) calloc(shadowSpace->registerSpaceSize, sizeof(int));
+ }
+
+ // Remember original state
+ shadowSpace->startPC = pc;
+ shadowSpace->fp = fp;
+ shadowSpace->glue = interpStatePtr;
+ shadowSpace->shadowFP = shadowSpace->registerSpace +
+ shadowSpace->registerSpaceSize - postBytes/4;
+
+ // Create a copy of the InterpState
+ memcpy(&(shadowSpace->interpState), interpStatePtr, sizeof(InterpState));
+ shadowSpace->interpState.fp = shadowSpace->shadowFP;
+ shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
+
+ // Create a copy of the stack
+ memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
+ preBytes+postBytes);
+
+ // Setup the shadowed heap space
+ shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
+
+ // Reset trace length
+ shadowSpace->traceLength = 0;
+
+ return shadowSpace;
+}
+
+/*
+ * Save ending PC, FP and compiled code exit point to shadow space.
+ * Return a pointer to the shadow space for JIT to restore state.
+ */
+void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
+ SelfVerificationState exitPoint)
+{
+ Thread *self = dvmThreadSelf();
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ shadowSpace->endPC = pc;
+ shadowSpace->endShadowFP = fp;
+
+ //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
+ // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
+ // (int)pc);
+
+ if (shadowSpace->selfVerificationState != kSVSStart) {
+ LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
+ self->threadId, shadowSpace->selfVerificationState);
+ LOGD("********** SHADOW STATE DUMP **********");
+ LOGD("* Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
+ (int)shadowSpace->endPC);
+ LOGD("* Interp FP: 0x%x", (int)shadowSpace->fp);
+ LOGD("* Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
+ (int)shadowSpace->endShadowFP);
+ }
+
+ // Special case when punting after a single instruction
+ if (exitPoint == kSVSPunt && pc == shadowSpace->startPC) {
+ shadowSpace->selfVerificationState = kSVSIdle;
+ } else {
+ shadowSpace->selfVerificationState = exitPoint;
+ }
+
+ return shadowSpace;
+}
+
+/* Print contents of virtual registers */
+static void selfVerificationPrintRegisters(int* addr, int numWords)
+{
+ int i;
+ for (i = 0; i < numWords; i++) {
+ LOGD("* 0x%x: (v%d) 0x%8x", (int)(addr+i), i, *(addr+i));
+ }
+}
+
+/* Print values maintained in shadowSpace */
+static void selfVerificationDumpState(const u2* pc, Thread* self)
+{
+ ShadowSpace* shadowSpace = self->shadowSpace;
+ StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
+ int frameBytes = (int) shadowSpace->registerSpace +
+ shadowSpace->registerSpaceSize*4 -
+ (int) shadowSpace->shadowFP;
+ int localRegs = 0;
+ int frameBytes2 = 0;
+ if (self->curFrame < shadowSpace->fp) {
+ localRegs = (stackSave->method->registersSize -
+ stackSave->method->insSize)*4;
+ frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
+ }
+ LOGD("********** SHADOW STATE DUMP **********");
+ LOGD("* CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
+ (int)(pc - stackSave->method->insns));
+ LOGD("* Class: %s Method: %s", stackSave->method->clazz->descriptor,
+ stackSave->method->name);
+ LOGD("* Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
+ (int)shadowSpace->endPC);
+ LOGD("* Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
+ (int)self->curFrame);
+ LOGD("* Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
+ (int)shadowSpace->endShadowFP);
+ LOGD("* Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
+ localRegs, frameBytes2);
+ LOGD("* Trace length: %d State: %d", shadowSpace->traceLength,
+ shadowSpace->selfVerificationState);
+}
+
+/* Print decoded instructions in the current trace */
+static void selfVerificationDumpTrace(const u2* pc, Thread* self)
+{
+ ShadowSpace* shadowSpace = self->shadowSpace;
+ StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
+ int i;
+ u2 *addr, *offset;
+ OpCode opcode;
+
+ LOGD("********** SHADOW TRACE DUMP **********");
+ for (i = 0; i < shadowSpace->traceLength; i++) {
+ addr = (u2*) shadowSpace->trace[i].addr;
+ offset = (u2*)(addr - stackSave->method->insns);
+ opcode = (OpCode) shadowSpace->trace[i].opcode;
+ LOGD("* 0x%x: (0x%04x) %s", (int)addr, (int)offset,
+ getOpcodeName(opcode));
+ }
+}
+
+/* Manage self verification while in the debug interpreter */
+static bool selfVerificationDebugInterp(const u2* pc, Thread* self)
+{
+ ShadowSpace *shadowSpace = self->shadowSpace;
+ OpCode opcode = *pc & 0xff;
+ SelfVerificationState state = shadowSpace->selfVerificationState;
+ //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
+ // self->threadId, (int)pc, (int)shadowSpace->endPC, state,
+ // shadowSpace->traceLength, getOpcodeName(opcode));
+
+ if (state == kSVSIdle || state == kSVSStart) {
+ LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
+ self->threadId, state);
+ selfVerificationDumpState(pc, self);
+ selfVerificationDumpTrace(pc, self);
+ }
+
+ /* Skip endPC once when trace has a backward branch */
+ if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
+ state != kSVSBackwardBranch) {
+ shadowSpace->selfVerificationState = kSVSDebugInterp;
+ }
+
+ /* Check that the current pc is the end of the trace */
+ if ((state == kSVSSingleStep || state == kSVSDebugInterp) &&
+ pc == shadowSpace->endPC) {
+
+ shadowSpace->selfVerificationState = kSVSIdle;
+
+ /* Check register space */
+ int frameBytes = (int) shadowSpace->registerSpace +
+ shadowSpace->registerSpaceSize*4 -
+ (int) shadowSpace->shadowFP;
+ if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
+ LOGD("~~~ DbgIntp(%d): REGISTERS UNEQUAL!", self->threadId);
+ selfVerificationDumpState(pc, self);
+ selfVerificationDumpTrace(pc, self);
+ LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
+ (int)shadowSpace->fp, frameBytes);
+ selfVerificationPrintRegisters((int*)shadowSpace->fp, frameBytes/4);
+ LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
+ (int)shadowSpace->shadowFP, frameBytes);
+ selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
+ frameBytes/4);
+ }
+ /* Check new frame if it exists (invokes only) */
+ if (self->curFrame < shadowSpace->fp) {
+ StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
+ int localRegs = (stackSave->method->registersSize -
+ stackSave->method->insSize)*4;
+ int frameBytes2 = (int) shadowSpace->fp -
+ (int) self->curFrame - localRegs;
+ if (memcmp(((char*)self->curFrame)+localRegs,
+ ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
+ LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) UNEQUAL!",
+ self->threadId);
+ selfVerificationDumpState(pc, self);
+ selfVerificationDumpTrace(pc, self);
+ LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
+ (int)self->curFrame, localRegs, frameBytes2);
+ selfVerificationPrintRegisters((int*)self->curFrame,
+ (frameBytes2+localRegs)/4);
+ LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
+ (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
+ selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
+ (frameBytes2+localRegs)/4);
+ }
+ }
+
+ /* Check memory space */
+ ShadowHeap* heapSpacePtr;
+ for (heapSpacePtr = shadowSpace->heapSpace;
+ heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
+ int mem_data = *((unsigned int*) heapSpacePtr->addr);
+ if (heapSpacePtr->data != mem_data) {
+ LOGD("~~~ DbgIntp(%d): MEMORY UNEQUAL!", self->threadId);
+ LOGD("* Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
+ heapSpacePtr->addr, mem_data, heapSpacePtr->data);
+ selfVerificationDumpState(pc, self);
+ selfVerificationDumpTrace(pc, self);
+ }
+ }
+ return true;
+
+ /* If end not been reached, make sure max length not exceeded */
+ } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
+ LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
+ LOGD("* startPC: 0x%x endPC: 0x%x currPC: 0x%x",
+ (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
+ selfVerificationDumpState(pc, self);
+ selfVerificationDumpTrace(pc, self);
+
+ return true;
+ }
+ /* Log the instruction address and opcode for debug */
+ shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
+ shadowSpace->trace[shadowSpace->traceLength].opcode = opcode;
+ shadowSpace->traceLength++;
+
+ return false;
+}
+#endif
+
int dvmJitStartup(void)
{
unsigned int i;
@@ -325,6 +604,14 @@
case kJitNormal:
switchInterp = !debugOrProfile;
break;
+#if defined(WITH_SELF_VERIFICATION)
+ case kJitSelfVerification:
+ if (selfVerificationDebugInterp(pc, self)) {
+ interpState->jitState = kJitNormal;
+ switchInterp = !debugOrProfile;
+ }
+ break;
+#endif
default:
dvmAbort();
}
@@ -584,6 +871,9 @@
case kJitSingleStepEnd:
case kJitOff:
case kJitNormal:
+#if defined(WITH_SELF_VERIFICATION)
+ case kJitSelfVerification:
+#endif
break;
default:
dvmAbort();
diff --git a/vm/interp/Jit.h b/vm/interp/Jit.h
index 660b5ec..f30a693 100644
--- a/vm/interp/Jit.h
+++ b/vm/interp/Jit.h
@@ -25,6 +25,49 @@
#define JIT_MAX_TRACE_LEN 100
+#if defined (WITH_SELF_VERIFICATION)
+
+#define REG_SPACE 256 /* default size of shadow space */
+#define HEAP_SPACE JIT_MAX_TRACE_LEN /* default size of heap space */
+
+typedef struct ShadowHeap {
+ int addr;
+ int data;
+} ShadowHeap;
+
+typedef struct InstructionTrace {
+ int addr;
+ int opcode;
+} InstructionTrace;
+
+typedef struct ShadowSpace {
+ const u2* startPC; /* starting pc of jitted region */
+ const void* fp; /* starting fp of jitted region */
+ void* glue; /* starting glue ptr of jitted region */
+ SelfVerificationState selfVerificationState; /* self verification state */
+ const u2* endPC; /* ending pc of jitted region */
+ void* shadowFP; /* pointer to fp in shadow space */
+ InterpState interpState; /* copy of interpState */
+ int* registerSpace; /* copy of register state */
+ int registerSpaceSize; /* current size of register space */
+ ShadowHeap heapSpace[HEAP_SPACE]; /* copy of heap space */
+ ShadowHeap* heapSpaceTail; /* tail pointer to heapSpace */
+ const void* endShadowFP; /* ending fp in shadow space */
+ InstructionTrace trace[JIT_MAX_TRACE_LEN]; /* opcode trace for debugging */
+ int traceLength; /* counter for current trace length */
+} ShadowSpace;
+
+/*
+ * Self verification functions.
+ */
+void* dvmSelfVerificationShadowSpaceAlloc(Thread* self);
+void dvmSelfVerificationShadowSpaceFree(Thread* self);
+void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
+ void* interpStatePtr);
+void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
+ SelfVerificationState exitPoint);
+#endif
+
/*
* JitTable hash function.
*/
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index 004ee13..e5416f6 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -11,6 +11,41 @@
.align 2
#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov r2,#kSVSPunt @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov r2,#kSVSSingleStep @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToTraceSelect
+dvmJitToTraceSelect:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToBackwardBranch
+dvmJitToBackwardBranch:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSNormal @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoChain @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+#else
/*
* Return from the translation cache to the interpreter when the compiler is
* having issues translating/executing a Dalvik instruction. We have to skip
@@ -127,6 +162,7 @@
bl dvmJitGetCodeAddr @ Is there a translation?
cmp r0,#0
bxne r0 @ continue native execution if so
+#endif
/*
* No translation, restore interpreter regs and start interpreting.
@@ -174,13 +210,61 @@
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
beq common_selectTrace
+#if !defined(WITH_SELF_VERIFICATION)
bxne r0 @ jump to the translation
+#else
+ bne dvmJitSelfVerificationStart @ set up self verification
+#endif
common_selectTrace:
mov r2,#kJitTSelectRequest @ ask for trace selection
str r2,[rGLUE,#offGlue_jitState]
mov r1,#1 @ set changeInterp
b common_gotoBail
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ */
+dvmJitSelfVerificationStart:
+ sub sp,sp,#4 @ allocate stack space
+ str r0,[sp,#0] @ save out dvmJitGetCodeAddr(rPC)
+ mov r0,rPC @ r0<- program counter
+ mov r1,rFP @ r1<- frame pointer
+ mov r2,rGLUE @ r2<- InterpState pointer
+ bl dvmSelfVerificationSaveState @ save registers to shadow space
+ ldr rFP,[r0,#20] @ rFP<- fp in shadow space
+ add rGLUE,r0,#24 @ rGLUE<- pointer in shadow space
+ ldr r0,[sp,#0] @ r0<- dvmJitGetCodeAddr(rPC)
+ add sp,sp,#4 @ restore stack
+ bx r0 @ jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+dvmJitSelfVerificationEnd:
+ mov r1,rFP @ pass ending fp
+ bl dvmSelfVerificationRestoreState @ restore pc and fp values
+ ldr rPC,[r0,#0] @ restore PC
+ ldr rFP,[r0,#4] @ restore FP
+ ldr rGLUE,[r0,#8] @ restore InterpState
+ ldr r1,[r0,#12] @ get exitPoint
+ cmp r1,#0 @ check for punt condition
+ beq 1f
+ mov r2,#kJitSelfVerification @ ask for self verification
+ str r2,[rGLUE,#offGlue_jitState]
+ mov r1,#1 @ set changeInterp
+ b common_gotoBail
+
+1: @ exit to interpreter without check
+ EXPORT_PC()
+ adrl rIBASE, dvmAsmInstructionStart
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#endif
+
#endif
/*
diff --git a/vm/mterp/common/asm-constants.h b/vm/mterp/common/asm-constants.h
index 716df3d..3e5692b 100644
--- a/vm/mterp/common/asm-constants.h
+++ b/vm/mterp/common/asm-constants.h
@@ -239,6 +239,19 @@
MTERP_CONSTANT(kJitTSelectEnd, 5)
MTERP_CONSTANT(kJitSingleStep, 6)
MTERP_CONSTANT(kJitSingleStepEnd, 7)
+MTERP_CONSTANT(kJitSelfVerification, 8)
+
+#if defined(WITH_SELF_VERIFICATION)
+MTERP_CONSTANT(kSVSIdle, 0)
+MTERP_CONSTANT(kSVSStart, 1)
+MTERP_CONSTANT(kSVSPunt, 2)
+MTERP_CONSTANT(kSVSSingleStep, 3)
+MTERP_CONSTANT(kSVSTraceSelect, 4)
+MTERP_CONSTANT(kSVSNormal, 5)
+MTERP_CONSTANT(kSVSNoChain, 6)
+MTERP_CONSTANT(kSVSBackwardBranch, 7)
+MTERP_CONSTANT(kSVSDebugInterp, 8)
+#endif
#endif
/* ClassStatus enumeration */
diff --git a/vm/mterp/out/InterpAsm-armv4t.S b/vm/mterp/out/InterpAsm-armv4t.S
index b0aa69b..757509e 100644
--- a/vm/mterp/out/InterpAsm-armv4t.S
+++ b/vm/mterp/out/InterpAsm-armv4t.S
@@ -9483,6 +9483,41 @@
.align 2
#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov r2,#kSVSPunt @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov r2,#kSVSSingleStep @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToTraceSelect
+dvmJitToTraceSelect:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToBackwardBranch
+dvmJitToBackwardBranch:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSNormal @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoChain @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+#else
/*
* Return from the translation cache to the interpreter when the compiler is
* having issues translating/executing a Dalvik instruction. We have to skip
@@ -9599,6 +9634,7 @@
bl dvmJitGetCodeAddr @ Is there a translation?
cmp r0,#0
bxne r0 @ continue native execution if so
+#endif
/*
* No translation, restore interpreter regs and start interpreting.
@@ -9646,13 +9682,61 @@
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
beq common_selectTrace
+#if !defined(WITH_SELF_VERIFICATION)
bxne r0 @ jump to the translation
+#else
+ bne dvmJitSelfVerificationStart @ set up self verification
+#endif
common_selectTrace:
mov r2,#kJitTSelectRequest @ ask for trace selection
str r2,[rGLUE,#offGlue_jitState]
mov r1,#1 @ set changeInterp
b common_gotoBail
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ */
+dvmJitSelfVerificationStart:
+ sub sp,sp,#4 @ allocate stack space
+ str r0,[sp,#0] @ save out dvmJitGetCodeAddr(rPC)
+ mov r0,rPC @ r0<- program counter
+ mov r1,rFP @ r1<- frame pointer
+ mov r2,rGLUE @ r2<- InterpState pointer
+ bl dvmSelfVerificationSaveState @ save registers to shadow space
+ ldr rFP,[r0,#20] @ rFP<- fp in shadow space
+ add rGLUE,r0,#24 @ rGLUE<- pointer in shadow space
+ ldr r0,[sp,#0] @ r0<- dvmJitGetCodeAddr(rPC)
+ add sp,sp,#4 @ restore stack
+ bx r0 @ jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+dvmJitSelfVerificationEnd:
+ mov r1,rFP @ pass ending fp
+ bl dvmSelfVerificationRestoreState @ restore pc and fp values
+ ldr rPC,[r0,#0] @ restore PC
+ ldr rFP,[r0,#4] @ restore FP
+ ldr rGLUE,[r0,#8] @ restore InterpState
+ ldr r1,[r0,#12] @ get exitPoint
+ cmp r1,#0 @ check for punt condition
+ beq 1f
+ mov r2,#kJitSelfVerification @ ask for self verification
+ str r2,[rGLUE,#offGlue_jitState]
+ mov r1,#1 @ set changeInterp
+ b common_gotoBail
+
+1: @ exit to interpreter without check
+ EXPORT_PC()
+ adrl rIBASE, dvmAsmInstructionStart
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#endif
+
#endif
/*
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index 5a8ae4c..79e319a 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -9001,6 +9001,41 @@
.align 2
#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov r2,#kSVSPunt @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov r2,#kSVSSingleStep @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToTraceSelect
+dvmJitToTraceSelect:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToBackwardBranch
+dvmJitToBackwardBranch:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSNormal @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoChain @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+#else
/*
* Return from the translation cache to the interpreter when the compiler is
* having issues translating/executing a Dalvik instruction. We have to skip
@@ -9117,6 +9152,7 @@
bl dvmJitGetCodeAddr @ Is there a translation?
cmp r0,#0
bxne r0 @ continue native execution if so
+#endif
/*
* No translation, restore interpreter regs and start interpreting.
@@ -9164,13 +9200,61 @@
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
beq common_selectTrace
+#if !defined(WITH_SELF_VERIFICATION)
bxne r0 @ jump to the translation
+#else
+ bne dvmJitSelfVerificationStart @ set up self verification
+#endif
common_selectTrace:
mov r2,#kJitTSelectRequest @ ask for trace selection
str r2,[rGLUE,#offGlue_jitState]
mov r1,#1 @ set changeInterp
b common_gotoBail
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ */
+dvmJitSelfVerificationStart:
+ sub sp,sp,#4 @ allocate stack space
+ str r0,[sp,#0] @ save out dvmJitGetCodeAddr(rPC)
+ mov r0,rPC @ r0<- program counter
+ mov r1,rFP @ r1<- frame pointer
+ mov r2,rGLUE @ r2<- InterpState pointer
+ bl dvmSelfVerificationSaveState @ save registers to shadow space
+ ldr rFP,[r0,#20] @ rFP<- fp in shadow space
+ add rGLUE,r0,#24 @ rGLUE<- pointer in shadow space
+ ldr r0,[sp,#0] @ r0<- dvmJitGetCodeAddr(rPC)
+ add sp,sp,#4 @ restore stack
+ bx r0 @ jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+dvmJitSelfVerificationEnd:
+ mov r1,rFP @ pass ending fp
+ bl dvmSelfVerificationRestoreState @ restore pc and fp values
+ ldr rPC,[r0,#0] @ restore PC
+ ldr rFP,[r0,#4] @ restore FP
+ ldr rGLUE,[r0,#8] @ restore InterpState
+ ldr r1,[r0,#12] @ get exitPoint
+ cmp r1,#0 @ check for punt condition
+ beq 1f
+ mov r2,#kJitSelfVerification @ ask for self verification
+ str r2,[rGLUE,#offGlue_jitState]
+ mov r1,#1 @ set changeInterp
+ b common_gotoBail
+
+1: @ exit to interpreter without check
+ EXPORT_PC()
+ adrl rIBASE, dvmAsmInstructionStart
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#endif
+
#endif
/*
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index 3f2069c..b8d66d2 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -9477,6 +9477,41 @@
.align 2
#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov r2,#kSVSPunt @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov r2,#kSVSSingleStep @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToTraceSelect
+dvmJitToTraceSelect:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToBackwardBranch
+dvmJitToBackwardBranch:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSNormal @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoChain @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+#else
/*
* Return from the translation cache to the interpreter when the compiler is
* having issues translating/executing a Dalvik instruction. We have to skip
@@ -9593,6 +9628,7 @@
bl dvmJitGetCodeAddr @ Is there a translation?
cmp r0,#0
bxne r0 @ continue native execution if so
+#endif
/*
* No translation, restore interpreter regs and start interpreting.
@@ -9640,13 +9676,61 @@
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
beq common_selectTrace
+#if !defined(WITH_SELF_VERIFICATION)
bxne r0 @ jump to the translation
+#else
+ bne dvmJitSelfVerificationStart @ set up self verification
+#endif
common_selectTrace:
mov r2,#kJitTSelectRequest @ ask for trace selection
str r2,[rGLUE,#offGlue_jitState]
mov r1,#1 @ set changeInterp
b common_gotoBail
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ */
+dvmJitSelfVerificationStart:
+ sub sp,sp,#4 @ allocate stack space
+ str r0,[sp,#0] @ save out dvmJitGetCodeAddr(rPC)
+ mov r0,rPC @ r0<- program counter
+ mov r1,rFP @ r1<- frame pointer
+ mov r2,rGLUE @ r2<- InterpState pointer
+ bl dvmSelfVerificationSaveState @ save registers to shadow space
+ ldr rFP,[r0,#20] @ rFP<- fp in shadow space
+ add rGLUE,r0,#24 @ rGLUE<- pointer in shadow space
+ ldr r0,[sp,#0] @ r0<- dvmJitGetCodeAddr(rPC)
+ add sp,sp,#4 @ restore stack
+ bx r0 @ jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+dvmJitSelfVerificationEnd:
+ mov r1,rFP @ pass ending fp
+ bl dvmSelfVerificationRestoreState @ restore pc and fp values
+ ldr rPC,[r0,#0] @ restore PC
+ ldr rFP,[r0,#4] @ restore FP
+ ldr rGLUE,[r0,#8] @ restore InterpState
+ ldr r1,[r0,#12] @ get exitPoint
+ cmp r1,#0 @ check for punt condition
+ beq 1f
+ mov r2,#kJitSelfVerification @ ask for self verification
+ str r2,[rGLUE,#offGlue_jitState]
+ mov r1,#1 @ set changeInterp
+ b common_gotoBail
+
+1: @ exit to interpreter without check
+ EXPORT_PC()
+ adrl rIBASE, dvmAsmInstructionStart
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#endif
+
#endif
/*
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index 0555a30..b09b1c1 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -8937,6 +8937,41 @@
.align 2
#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov r2,#kSVSPunt @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov r2,#kSVSSingleStep @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToTraceSelect
+dvmJitToTraceSelect:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToBackwardBranch
+dvmJitToBackwardBranch:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr r0,[r14, #-1] @ pass our target PC
+ mov r2,#kSVSNormal @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoChain @ r2<- interpreter entry point
+ b dvmJitSelfVerificationEnd @ doesn't return
+#else
/*
* Return from the translation cache to the interpreter when the compiler is
* having issues translating/executing a Dalvik instruction. We have to skip
@@ -9053,6 +9088,7 @@
bl dvmJitGetCodeAddr @ Is there a translation?
cmp r0,#0
bxne r0 @ continue native execution if so
+#endif
/*
* No translation, restore interpreter regs and start interpreting.
@@ -9100,13 +9136,61 @@
bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
cmp r0,#0
beq common_selectTrace
+#if !defined(WITH_SELF_VERIFICATION)
bxne r0 @ jump to the translation
+#else
+ bne dvmJitSelfVerificationStart @ set up self verification
+#endif
common_selectTrace:
mov r2,#kJitTSelectRequest @ ask for trace selection
str r2,[rGLUE,#offGlue_jitState]
mov r1,#1 @ set changeInterp
b common_gotoBail
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ */
+dvmJitSelfVerificationStart:
+ sub sp,sp,#4 @ allocate stack space
+ str r0,[sp,#0] @ save out dvmJitGetCodeAddr(rPC)
+ mov r0,rPC @ r0<- program counter
+ mov r1,rFP @ r1<- frame pointer
+ mov r2,rGLUE @ r2<- InterpState pointer
+ bl dvmSelfVerificationSaveState @ save registers to shadow space
+ ldr rFP,[r0,#20] @ rFP<- fp in shadow space
+ add rGLUE,r0,#24 @ rGLUE<- pointer in shadow space
+ ldr r0,[sp,#0] @ r0<- dvmJitGetCodeAddr(rPC)
+ add sp,sp,#4 @ restore stack
+ bx r0 @ jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+dvmJitSelfVerificationEnd:
+ mov r1,rFP @ pass ending fp
+ bl dvmSelfVerificationRestoreState @ restore pc and fp values
+ ldr rPC,[r0,#0] @ restore PC
+ ldr rFP,[r0,#4] @ restore FP
+ ldr rGLUE,[r0,#8] @ restore InterpState
+ ldr r1,[r0,#12] @ get exitPoint
+ cmp r1,#0 @ check for punt condition
+ beq 1f
+ mov r2,#kJitSelfVerification @ ask for self verification
+ str r2,[rGLUE,#offGlue_jitState]
+ mov r1,#1 @ set changeInterp
+ b common_gotoBail
+
+1: @ exit to interpreter without check
+ EXPORT_PC()
+ adrl rIBASE, dvmAsmInstructionStart
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#endif
+
#endif
/*
diff --git a/vm/mterp/out/InterpC-portdbg.c b/vm/mterp/out/InterpC-portdbg.c
index 90b2a75..6d6d16d 100644
--- a/vm/mterp/out/InterpC-portdbg.c
+++ b/vm/mterp/out/InterpC-portdbg.c
@@ -1498,13 +1498,22 @@
/* Check to see if we've got a trace selection request. If we do,
* but something is amiss, revert to the fast interpreter.
*/
+#if !defined(WITH_SELF_VERIFICATION)
if (dvmJitCheckTraceRequest(self,interpState)) {
interpState->nextMode = INTERP_STD;
//LOGD("** something wrong, exiting\n");
return true;
}
-#endif
-#endif
+#else
+ if (interpState->jitState != kJitSelfVerification &&
+ dvmJitCheckTraceRequest(self,interpState)) {
+ interpState->nextMode = INTERP_STD;
+ //LOGD("** something wrong, exiting\n");
+ return true;
+ }
+#endif /* WITH_SELF_VERIFICATION */
+#endif /* INTERP_TYPE == INTERP_DBG */
+#endif /* WITH_JIT */
/* copy state in */
curMethod = interpState->method;
diff --git a/vm/mterp/out/InterpC-portstd.c b/vm/mterp/out/InterpC-portstd.c
index c28bf5b..2476102 100644
--- a/vm/mterp/out/InterpC-portstd.c
+++ b/vm/mterp/out/InterpC-portstd.c
@@ -1212,13 +1212,22 @@
/* Check to see if we've got a trace selection request. If we do,
* but something is amiss, revert to the fast interpreter.
*/
+#if !defined(WITH_SELF_VERIFICATION)
if (dvmJitCheckTraceRequest(self,interpState)) {
interpState->nextMode = INTERP_STD;
//LOGD("** something wrong, exiting\n");
return true;
}
-#endif
-#endif
+#else
+ if (interpState->jitState != kJitSelfVerification &&
+ dvmJitCheckTraceRequest(self,interpState)) {
+ interpState->nextMode = INTERP_STD;
+ //LOGD("** something wrong, exiting\n");
+ return true;
+ }
+#endif /* WITH_SELF_VERIFICATION */
+#endif /* INTERP_TYPE == INTERP_DBG */
+#endif /* WITH_JIT */
/* copy state in */
curMethod = interpState->method;
diff --git a/vm/mterp/portable/entry.c b/vm/mterp/portable/entry.c
index 9c7c2d6..f0e63f1 100644
--- a/vm/mterp/portable/entry.c
+++ b/vm/mterp/portable/entry.c
@@ -47,13 +47,22 @@
/* Check to see if we've got a trace selection request. If we do,
* but something is amiss, revert to the fast interpreter.
*/
+#if !defined(WITH_SELF_VERIFICATION)
if (dvmJitCheckTraceRequest(self,interpState)) {
interpState->nextMode = INTERP_STD;
//LOGD("** something wrong, exiting\n");
return true;
}
-#endif
-#endif
+#else
+ if (interpState->jitState != kJitSelfVerification &&
+ dvmJitCheckTraceRequest(self,interpState)) {
+ interpState->nextMode = INTERP_STD;
+ //LOGD("** something wrong, exiting\n");
+ return true;
+ }
+#endif /* WITH_SELF_VERIFICATION */
+#endif /* INTERP_TYPE == INTERP_DBG */
+#endif /* WITH_JIT */
/* copy state in */
curMethod = interpState->method;