Free memory associated with the trace descriptor of ALL dropped JIT work orders
To further reduce the memory consumption only enable the method vs trace
compile size stats when WITH_JIT_TUNING is configured.
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index e01ca89..032b859 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -53,6 +53,9 @@
* Attempt to enqueue a work order, returning true if successful.
* This routine will not block, but simply return if it couldn't
* aquire the lock or if the queue is full.
+ *
+ * NOTE: Make sure that the caller frees the info pointer if the return value
+ * is false.
*/
bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
{
@@ -62,11 +65,6 @@
bool result = true;
if (dvmTryLockMutex(&gDvmJit.compilerLock)) {
- /*
- * Make sure the memory associated with the info pointer is freed for
- * dropped work orders.
- */
- free(info);
return false; // Couldn't acquire the lock
}
@@ -315,8 +313,10 @@
dvmLockMutex(&gDvmJit.compilerLock);
+#if defined(WITH_JIT_TUNING)
/* Track method-level compilation statistics */
gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL);
+#endif
dvmUnlockMutex(&gDvmJit.compilerLock);
diff --git a/vm/compiler/Frontend.c b/vm/compiler/Frontend.c
index c8e8e52..0c8a8df 100644
--- a/vm/compiler/Frontend.c
+++ b/vm/compiler/Frontend.c
@@ -191,6 +191,7 @@
return (int) m1->method - (int) m2->method;
}
+#if defined(WITH_JIT_TUNING)
/*
* Analyze each method whose traces are ever compiled. Collect a variety of
* statistics like the ratio of exercised vs overall code and code bloat
@@ -247,6 +248,7 @@
realMethodEntry->dalvikSize = insnSize * 2;
return realMethodEntry;
}
+#endif
/*
* Crawl the stack of the thread that requesed compilation to see if any of the
@@ -297,7 +299,9 @@
int numBlocks = 0;
static int compilationId;
CompilationUnit cUnit;
+#if defined(WITH_JIT_TUNING)
CompilerMethodStats *methodStats;
+#endif
/* If we've already compiled this trace, just return success */
if (dvmJitGetCodeAddr(startCodePtr) && !info->discardResult) {
@@ -307,8 +311,10 @@
compilationId++;
memset(&cUnit, 0, sizeof(CompilationUnit));
+#if defined(WITH_JIT_TUNING)
/* Locate the entry to store compilation statistics for this method */
methodStats = analyzeMethodBody(desc->method);
+#endif
/* Initialize the printMe flag */
cUnit.printMe = gDvmJit.printMe;
@@ -449,8 +455,10 @@
}
}
+#if defined(WITH_JIT_TUNING)
/* Convert # of half-word to bytes */
methodStats->compiledDalvikSize += traceSize * 2;
+#endif
/*
* Now scan basic blocks containing real code to connect the
@@ -745,7 +753,9 @@
/* Success */
if (!cUnit.halveInstCount) {
+#if defined(WITH_JIT_TUNING)
methodStats->nativeSize += cUnit.totalSize;
+#endif
return info->codeAddress != NULL;
/* Halve the instruction count and retry again */
diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c
index 4949b9c..ec39c8a 100644
--- a/vm/interp/Jit.c
+++ b/vm/interp/Jit.c
@@ -221,6 +221,10 @@
JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC);
if (desc) {
dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
+ /*
+ * This function effectively terminates the VM right here, so not
+ * freeing the desc pointer when the enqueuing fails is acceptable.
+ */
}
gDvmJit.selfVerificationSpin = true;
while(gDvmJit.selfVerificationSpin) sleep(10);
@@ -722,6 +726,12 @@
if (gDvmJit.blockingMode) {
dvmCompilerDrainQueue();
}
+ } else {
+ /*
+ * Make sure the descriptor for the abandoned work order is
+ * freed.
+ */
+ free(desc);
}
/*
* Reset "trace in progress" flag whether or not we