Fix for the JIT blocking mode plus some code cleanup.
Bug: 2517606
Change-Id: I2b5aa92ceaf23d484329330ae20de5966704280b
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index 2256c47..efbe0a5 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -112,12 +112,14 @@
return result;
}
-/* Block until queue length is 0 */
+/* Block until the queue length is 0, or there is a pending suspend request */
void dvmCompilerDrainQueue(void)
{
- int oldStatus = dvmChangeStatus(NULL, THREAD_VMWAIT);
+ Thread *self = dvmThreadSelf();
+
dvmLockMutex(&gDvmJit.compilerLock);
- while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread) {
+ while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
+ self->suspendCount == 0) {
/*
* Use timed wait here - more than one mutator threads may be blocked
* but the compiler thread will only signal once when the queue is
@@ -127,7 +129,6 @@
dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock, 1000, 0);
}
dvmUnlockMutex(&gDvmJit.compilerLock);
- dvmChangeStatus(NULL, oldStatus);
}
bool dvmCompilerSetupCodeCache(void)
@@ -577,8 +578,15 @@
/*
* Check whether there is a suspend request on me. This
* is necessary to allow a clean shutdown.
+ *
+ * However, in the blocking stress testing mode, let the
+ * compiler thread continue doing compilations to unblock
+ * other requesting threads. This may occasionally cause
+ * shutdown from proceeding cleanly in the standalone invocation
+ * of the vm but this should be acceptable.
*/
- dvmCheckSuspendPending(NULL);
+ if (!gDvmJit.blockingMode)
+ dvmCheckSuspendPending(NULL);
/* Is JitTable filling up? */
if (gDvmJit.jitTableEntriesUsed >
(gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
diff --git a/vm/compiler/CompilerIR.h b/vm/compiler/CompilerIR.h
index 00a45f9..2bf243d 100644
--- a/vm/compiler/CompilerIR.h
+++ b/vm/compiler/CompilerIR.h
@@ -184,6 +184,12 @@
const u2 *switchOverflowPad;
} CompilationUnit;
+#if defined(WITH_SELF_VERIFICATION)
+#define HEAP_ACCESS_SHADOW(_state) cUnit->heapMemOp = _state
+#else
+#define HEAP_ACCESS_SHADOW(_state)
+#endif
+
BasicBlock *dvmCompilerNewBB(BBType blockType);
void dvmCompilerAppendMIR(BasicBlock *bb, MIR *mir);
diff --git a/vm/compiler/codegen/arm/CodegenDriver.c b/vm/compiler/codegen/arm/CodegenDriver.c
index 79a9fb3..11d8268 100644
--- a/vm/compiler/codegen/arm/CodegenDriver.c
+++ b/vm/compiler/codegen/arm/CodegenDriver.c
@@ -275,13 +275,11 @@
NULL);/* null object? */
opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
dvmCompilerFreeTemp(cUnit, regPtr);
storeValueWide(cUnit, rlDest, rlResult);
}
@@ -299,13 +297,11 @@
NULL);/* null object? */
regPtr = dvmCompilerAllocTemp(cUnit);
opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
dvmCompilerFreeTemp(cUnit, regPtr);
}
@@ -325,14 +321,12 @@
rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
NULL);/* null object? */
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
loadBaseDisp(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
size, rlObj.sRegLow);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
storeValue(cUnit, rlDest, rlResult);
}
@@ -351,13 +345,10 @@
int regPtr;
genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
NULL);/* null object? */
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
storeBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlSrc.lowReg, size);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
}
@@ -408,25 +399,21 @@
opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
}
rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
dvmCompilerFreeTemp(cUnit, regPtr);
storeValueWide(cUnit, rlDest, rlResult);
} else {
rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
loadBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlResult.lowReg,
scale, size);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
dvmCompilerFreeTemp(cUnit, regPtr);
storeValue(cUnit, rlDest, rlResult);
}
@@ -489,24 +476,19 @@
opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
}
rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
dvmCompilerFreeTemp(cUnit, regPtr);
} else {
rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
storeBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlSrc.lowReg,
scale, size);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
}
}
@@ -585,14 +567,10 @@
target->defMask = ENCODE_ALL;
branchOver->generic.target = (LIR *) target;
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+ HEAP_ACCESS_SHADOW(true);
storeBaseIndexed(cUnit, regPtr, regIndex, r0,
scale, kWord);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
}
static bool genShiftOpLong(CompilationUnit *cUnit, MIR *mir,
@@ -1529,13 +1507,11 @@
rlDest = dvmCompilerGetDest(cUnit, mir, 0);
rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
loadWordDisp(cUnit, tReg, 0, rlResult.lowReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
storeValue(cUnit, rlDest, rlResult);
break;
}
@@ -1548,13 +1524,11 @@
rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
loadPair(cUnit, tReg, rlResult.lowReg, rlResult.highReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
storeValueWide(cUnit, rlDest, rlResult);
break;
}
@@ -1573,13 +1547,11 @@
rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
storeWordDisp(cUnit, tReg, 0 ,rlSrc.lowReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
+
break;
}
case OP_SPUT_WIDE: {
@@ -1592,13 +1564,10 @@
rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = true;
-#endif
+
+ HEAP_ACCESS_SHADOW(true);
storePair(cUnit, tReg, rlSrc.lowReg, rlSrc.highReg);
-#if defined(WITH_SELF_VERIFICATION)
- cUnit->heapMemOp = false;
-#endif
+ HEAP_ACCESS_SHADOW(false);
break;
}
case OP_NEW_INSTANCE: {
@@ -1657,7 +1626,7 @@
* so that we can tell if it happens frequently.
*/
if (classPtr == NULL) {
- LOGD("null clazz in OP_CHECK_CAST, single-stepping");
+ LOGVV("null clazz in OP_CHECK_CAST, single-stepping");
genInterpSingleStep(cUnit, mir);
return false;
}
@@ -3251,9 +3220,14 @@
static void handleNormalChainingCell(CompilationUnit *cUnit,
unsigned int offset)
{
- loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
- jitToInterpEntries.dvmJitToInterpNormal), r0);
- opReg(cUnit, kOpBlx, r0);
+ /*
+ * Use raw instruction constructors to guarantee that the generated
+ * instructions fit the predefined cell size.
+ */
+ newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
+ offsetof(InterpState,
+ jitToInterpEntries.dvmJitToInterpNormal) >> 2);
+ newLIR1(cUnit, kThumbBlxR, r0);
addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
}
@@ -3264,9 +3238,14 @@
static void handleHotChainingCell(CompilationUnit *cUnit,
unsigned int offset)
{
- loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
- jitToInterpEntries.dvmJitToInterpTraceSelect), r0);
- opReg(cUnit, kOpBlx, r0);
+ /*
+ * Use raw instruction constructors to guarantee that the generated
+ * instructions fit the predefined cell size.
+ */
+ newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
+ offsetof(InterpState,
+ jitToInterpEntries.dvmJitToInterpTraceSelect) >> 2);
+ newLIR1(cUnit, kThumbBlxR, r0);
addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
}
@@ -3275,6 +3254,10 @@
static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
unsigned int offset)
{
+ /*
+ * Use raw instruction constructors to guarantee that the generated
+ * instructions fit the predefined cell size.
+ */
#if defined(WITH_SELF_VERIFICATION)
newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
offsetof(InterpState,
@@ -3292,9 +3275,14 @@
static void handleInvokeSingletonChainingCell(CompilationUnit *cUnit,
const Method *callee)
{
- loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
- jitToInterpEntries.dvmJitToInterpTraceSelect), r0);
- opReg(cUnit, kOpBlx, r0);
+ /*
+ * Use raw instruction constructors to guarantee that the generated
+ * instructions fit the predefined cell size.
+ */
+ newLIR3(cUnit, kThumbLdrRRI5, r0, rGLUE,
+ offsetof(InterpState,
+ jitToInterpEntries.dvmJitToInterpTraceSelect) >> 2);
+ newLIR1(cUnit, kThumbBlxR, r0);
addWordData(cUnit, (int) (callee->insns), true);
}
diff --git a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
index aa343aa..7f9fa3b 100644
--- a/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv5te-vfp/ArchVariant.c
@@ -57,6 +57,7 @@
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking mode */
gDvmJit.blockingMode = true;
+ gDvm.nativeDebuggerActive = true;
#endif
/* Codegen-specific assumptions */
@@ -70,11 +71,11 @@
assert(sizeof(StackSaveArea) < 236);
/*
- * EA is calculated by doing "Rn + imm5 << 2", and there are 5 entry points
- * that codegen may access, make sure that the offset from the top of the
- * struct is less than 108.
+ * EA is calculated by doing "Rn + imm5 << 2", make sure that the last
+ * offset from the struct is less than 128.
*/
- assert(offsetof(InterpState, jitToInterpEntries) < 108);
+ assert((offsetof(InterpState, jitToInterpEntries) +
+ sizeof(struct JitToInterpEntries)) <= 128);
return true;
}
diff --git a/vm/compiler/codegen/arm/armv5te/ArchVariant.c b/vm/compiler/codegen/arm/armv5te/ArchVariant.c
index d33debe..e018ea1 100644
--- a/vm/compiler/codegen/arm/armv5te/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv5te/ArchVariant.c
@@ -57,6 +57,7 @@
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking mode */
gDvmJit.blockingMode = true;
+ gDvm.nativeDebuggerActive = true;
#endif
/* Codegen-specific assumptions */
@@ -70,11 +71,11 @@
assert(sizeof(StackSaveArea) < 236);
/*
- * EA is calculated by doing "Rn + imm5 << 2", and there are 5 entry points
- * that codegen may access, make sure that the offset from the top of the
- * struct is less than 108.
+ * EA is calculated by doing "Rn + imm5 << 2", make sure that the last
+ * offset from the struct is less than 128.
*/
- assert(offsetof(InterpState, jitToInterpEntries) < 108);
+ assert((offsetof(InterpState, jitToInterpEntries) +
+ sizeof(struct JitToInterpEntries)) <= 128);
return true;
}
diff --git a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
index fe2b1d4..5a14774 100644
--- a/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv7-a-neon/ArchVariant.c
@@ -52,6 +52,7 @@
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking */
gDvmJit.blockingMode = true;
+ gDvm.nativeDebuggerActive = true;
#endif
/* Codegen-specific assumptions */
@@ -65,11 +66,11 @@
assert(sizeof(StackSaveArea) < 236);
/*
- * EA is calculated by doing "Rn + imm5 << 2", and there are 5 entry points
- * that codegen may access, make sure that the offset from the top of the
- * struct is less than 108.
+ * EA is calculated by doing "Rn + imm5 << 2", make sure that the last
+ * offset from the struct is less than 128.
*/
- assert(offsetof(InterpState, jitToInterpEntries) < 108);
+ assert((offsetof(InterpState, jitToInterpEntries) +
+ sizeof(struct JitToInterpEntries)) <= 128);
return true;
}
diff --git a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
index fe2b1d4..5a14774 100644
--- a/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
+++ b/vm/compiler/codegen/arm/armv7-a/ArchVariant.c
@@ -52,6 +52,7 @@
#if defined(WITH_SELF_VERIFICATION)
/* Force into blocking */
gDvmJit.blockingMode = true;
+ gDvm.nativeDebuggerActive = true;
#endif
/* Codegen-specific assumptions */
@@ -65,11 +66,11 @@
assert(sizeof(StackSaveArea) < 236);
/*
- * EA is calculated by doing "Rn + imm5 << 2", and there are 5 entry points
- * that codegen may access, make sure that the offset from the top of the
- * struct is less than 108.
+ * EA is calculated by doing "Rn + imm5 << 2", make sure that the last
+ * offset from the struct is less than 128.
*/
- assert(offsetof(InterpState, jitToInterpEntries) < 108);
+ assert((offsetof(InterpState, jitToInterpEntries) +
+ sizeof(struct JitToInterpEntries)) <= 128);
return true;
}
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index 8d1c8c2..b93eee3 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -60,6 +60,11 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+ ldr pc, .LdeadFood @ should not see this under self-verification mode
+.LdeadFood:
+ .word 0xdeadf00d
+#endif
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
mov r2, #0
str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 9c1e788..d8a2784 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -1501,6 +1501,11 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+ ldr pc, .LdeadFood @ should not see this under self-verification mode
+.LdeadFood:
+ .word 0xdeadf00d
+#endif
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
mov r2, #0
str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index e73d940..eab49cf 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -1229,6 +1229,11 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+ ldr pc, .LdeadFood @ should not see this under self-verification mode
+.LdeadFood:
+ .word 0xdeadf00d
+#endif
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
mov r2, #0
str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index 51a5004..cb00858 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -1501,6 +1501,11 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+ ldr pc, .LdeadFood @ should not see this under self-verification mode
+.LdeadFood:
+ .word 0xdeadf00d
+#endif
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
mov r2, #0
str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index 737ea5f..cf6fff9 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -1501,6 +1501,11 @@
* r0 Faulting Dalvik PC
*/
.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+ ldr pc, .LdeadFood @ should not see this under self-verification mode
+.LdeadFood:
+ .word 0xdeadf00d
+#endif
ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
mov r2, #0
str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land