Move finalization out of the VM.
This change introduces a new reference class whose referent
points to instances requiring finalization. This makes the
finalization of objects possible using a reference queue and
a dedicated thread which removes objects from the queue.
Change-Id: I0ff6dd272f00ca08c6ed3aa667bf766a039a944e
diff --git a/vm/Globals.h b/vm/Globals.h
index ad28e14..5287dad 100644
--- a/vm/Globals.h
+++ b/vm/Globals.h
@@ -285,6 +285,7 @@
ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationFactory;
ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMember;
ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMemberArray;
+ ClassObject* classJavaLangRefFinalizerReference;
/*
* classes representing exception types. The names here don't include
@@ -337,7 +338,6 @@
int voffJavaLangObject_equals;
int voffJavaLangObject_hashCode;
int voffJavaLangObject_toString;
- int voffJavaLangObject_finalize;
/* field offsets - Class */
int offJavaLangClass_pd;
@@ -390,6 +390,9 @@
/* method pointers - java.lang.ref.Reference */
Method* methJavaLangRefReference_enqueueInternal;
+ /* more method pointers - java.lang.ref.FinalizerReference */
+ Method* methJavaLangRefFinalizerReferenceAdd;
+
/* constructor method pointers; no vtable involved, so use Method* */
Method* methJavaLangStackTraceElement_init;
Method* methJavaLangReflectConstructor_init;
diff --git a/vm/InitRefs.c b/vm/InitRefs.c
index 137a481..953db8e 100644
--- a/vm/InitRefs.c
+++ b/vm/InitRefs.c
@@ -340,6 +340,9 @@
"Lorg/apache/harmony/lang/annotation/AnnotationFactory;", "createAnnotation",
"(Ljava/lang/Class;[Lorg/apache/harmony/lang/annotation/AnnotationMember;)"
"Ljava/lang/annotation/Annotation;" },
+ { &gDvm.methodTraceClassPrepMethod, "Ldalvik/system/VMDebug;", "startClassPrep", "()V" },
+ { &gDvm.methJavaLangRefFinalizerReferenceAdd,
+ "Ljava/lang/ref/FinalizerReference;", "add", "(Ljava/lang/Object;)V" },
{ NULL, NULL, NULL, NULL }
};
@@ -386,7 +389,6 @@
"(Ljava/lang/String;)Ljava/lang/Class;" },
{ &gDvm.voffJavaLangObject_equals, "Ljava/lang/Object;", "equals",
"(Ljava/lang/Object;)Z" },
- { &gDvm.voffJavaLangObject_finalize, "Ljava/lang/Object;", "finalize", "()V" },
{ &gDvm.voffJavaLangObject_hashCode, "Ljava/lang/Object;", "hashCode", "()I" },
{ &gDvm.voffJavaLangObject_toString, "Ljava/lang/Object;", "toString",
"()Ljava/lang/String;" },
@@ -407,6 +409,13 @@
return true;
}
+static bool initFinalizerReference()
+{
+ gDvm.classJavaLangRefFinalizerReference =
+ dvmFindSystemClass("Ljava/lang/ref/FinalizerReference;");
+ return gDvm.classJavaLangRefFinalizerReference != NULL;
+}
+
static bool verifyStringOffset(const char* name, int actual, int expected) {
if (actual != expected) {
LOGE("InitRefs: String.%s offset = %d; expected %d\n", name, actual, expected);
@@ -447,6 +456,7 @@
&& initConstructorReferences()
&& initDirectMethodReferences()
&& initVirtualMethodOffsets()
+ && initFinalizerReference()
&& verifyStringOffsets();
}
@@ -465,7 +475,7 @@
/* Note: enqueueInternal() is private and thus a direct method. */
return initFieldOffset(classReference, &gDvm.offJavaLangRefReference_pendingNext,
- "pendingNext", "Ljava/lang/ref/Reference;")
+ "pendingNext", "Ljava/lang/Object;")
&& initFieldOffset(classReference, &gDvm.offJavaLangRefReference_queue,
"queue", "Ljava/lang/ref/ReferenceQueue;")
&& initFieldOffset(classReference, &gDvm.offJavaLangRefReference_queueNext,
diff --git a/vm/Thread.c b/vm/Thread.c
index eb026fd..33d6599 100644
--- a/vm/Thread.c
+++ b/vm/Thread.c
@@ -1311,23 +1311,6 @@
assert(threadObj != NULL);
- if(gDvm.zygote) {
- // Allow the sampling profiler thread. We shut it down before forking.
- StringObject* nameStr = (StringObject*) dvmGetFieldObject(threadObj,
- gDvm.offJavaLangThread_name);
- char* threadName = dvmCreateCstrFromString(nameStr);
- bool profilerThread = strcmp(threadName, "SamplingProfiler") == 0;
- if (!profilerThread) {
- dvmThrowExceptionFmt(gDvm.exIllegalStateException,
- "No new threads in -Xzygote mode. "
- "Found thread named '%s'", threadName);
-
- free(threadName);
- goto fail;
- }
- free(threadName);
- }
-
self = dvmThreadSelf();
if (reqStackSize == 0)
stackSize = gDvm.stackSize;
diff --git a/vm/alloc/Alloc.c b/vm/alloc/Alloc.c
index 8269290..370ac35 100644
--- a/vm/alloc/Alloc.c
+++ b/vm/alloc/Alloc.c
@@ -169,6 +169,7 @@
{
Object* newObj;
+ assert(clazz != NULL);
assert(dvmIsClassInitialized(clazz) || dvmIsClassInitializing(clazz));
/* allocate on GC heap; memory is zeroed out */
diff --git a/vm/alloc/Heap.c b/vm/alloc/Heap.c
index c0ef8fb..1a38a76 100644
--- a/vm/alloc/Heap.c
+++ b/vm/alloc/Heap.c
@@ -104,8 +104,6 @@
* and reference objects.
*/
dvmInitMutex(&gDvm.heapWorkerListLock);
- gcHeap->finalizableRefs = NULL;
- gcHeap->pendingFinalizationRefs = NULL;
gcHeap->referenceOperations = NULL;
if (!dvmCardTableStartup(gDvm.heapMaximumSize)) {
@@ -135,12 +133,6 @@
* cleaned up explicitly. The process may stick around, so we
* don't want to leak any native memory.
*/
- dvmHeapFreeLargeTable(gDvm.gcHeap->finalizableRefs);
- gDvm.gcHeap->finalizableRefs = NULL;
-
- dvmHeapFreeLargeTable(gDvm.gcHeap->pendingFinalizationRefs);
- gDvm.gcHeap->pendingFinalizationRefs = NULL;
-
dvmHeapFreeLargeTable(gDvm.gcHeap->referenceOperations);
gDvm.gcHeap->referenceOperations = NULL;
@@ -191,27 +183,15 @@
*
* Typically only called by the heap worker thread.
*/
-Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
+Object *dvmGetNextHeapWorkerObject()
{
Object *obj;
GcHeap *gcHeap = gDvm.gcHeap;
- assert(op != NULL);
-
dvmLockMutex(&gDvm.heapWorkerListLock);
obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
if (obj != NULL) {
- *op = WORKER_ENQUEUE;
- } else {
- obj = dvmHeapGetNextObjectFromLargeTable(
- &gcHeap->pendingFinalizationRefs);
- if (obj != NULL) {
- *op = WORKER_FINALIZE;
- }
- }
-
- if (obj != NULL) {
/* Don't let the GC collect the object until the
* worker thread is done with it.
*/
@@ -709,6 +689,7 @@
dvmHeapProcessReferences(&gcHeap->softReferences,
spec->softReferencePolicy == CLEAR,
&gcHeap->weakReferences,
+ &gcHeap->finalizerReferences,
&gcHeap->phantomReferences);
#if defined(WITH_JIT)
diff --git a/vm/alloc/HeapInternal.h b/vm/alloc/HeapInternal.h
index d10a417..7f8c9c5 100644
--- a/vm/alloc/HeapInternal.h
+++ b/vm/alloc/HeapInternal.h
@@ -29,37 +29,15 @@
struct GcHeap {
HeapSource *heapSource;
- /* List of heap objects that will require finalization when
- * collected. I.e., instance objects
- *
- * a) whose class definitions override java.lang.Object.finalize()
- *
- * *** AND ***
- *
- * b) that have never been finalized.
- *
- * Note that this does not exclude non-garbage objects; this
- * is not the list of pending finalizations, but of objects that
- * potentially have finalization in their futures.
- */
- LargeHeapRefTable *finalizableRefs;
-
- /* The list of objects that need to have finalize() called
- * on themselves. These references are part of the root set.
- *
- * This table is protected by gDvm.heapWorkerListLock, which must
- * be acquired after the heap lock.
- */
- LargeHeapRefTable *pendingFinalizationRefs;
-
/* Linked lists of subclass instances of java/lang/ref/Reference
* that we find while recursing. The "next" pointers are hidden
* in the objects' <code>int Reference.vmData</code> fields.
* These lists are cleared and rebuilt each time the GC runs.
*/
- Object *softReferences;
- Object *weakReferences;
- Object *phantomReferences;
+ Object *softReferences;
+ Object *weakReferences;
+ Object *finalizerReferences;
+ Object *phantomReferences;
/* The list of Reference objects that need to be cleared and/or
* enqueued. The bottom two bits of the object pointers indicate
diff --git a/vm/alloc/HeapWorker.c b/vm/alloc/HeapWorker.c
index 57089f4..e0116d1 100644
--- a/vm/alloc/HeapWorker.c
+++ b/vm/alloc/HeapWorker.c
@@ -272,17 +272,11 @@
static void doHeapWork(Thread *self)
{
Object *obj;
- HeapWorkerOperation op;
- int numFinalizersCalled, numReferencesEnqueued;
+ size_t numReferencesEnqueued;
- assert(gDvm.voffJavaLangObject_finalize >= 0);
assert(gDvm.methJavaLangRefReference_enqueueInternal != NULL);
-
- numFinalizersCalled = 0;
numReferencesEnqueued = 0;
- while ((obj = dvmGetNextHeapWorkerObject(&op)) != NULL) {
- Method *method = NULL;
-
+ while ((obj = dvmGetNextHeapWorkerObject()) != NULL) {
/* Make sure the object hasn't been collected since
* being scheduled.
*/
@@ -290,30 +284,18 @@
/* Call the appropriate method(s).
*/
- if (op == WORKER_FINALIZE) {
- numFinalizersCalled++;
- method = obj->clazz->vtable[gDvm.voffJavaLangObject_finalize];
- assert(dvmCompareNameDescriptorAndMethod("finalize", "()V",
- method) == 0);
- assert(method->clazz != gDvm.classJavaLangObject);
- callMethod(self, obj, method);
- } else {
- assert(op == WORKER_ENQUEUE);
- assert(dvmGetFieldObject(
- obj, gDvm.offJavaLangRefReference_queue) != NULL);
- assert(dvmGetFieldObject(
- obj, gDvm.offJavaLangRefReference_queueNext) == NULL);
- numReferencesEnqueued++;
- callMethod(self, obj,
- gDvm.methJavaLangRefReference_enqueueInternal);
- }
+ assert(dvmGetFieldObject(
+ obj, gDvm.offJavaLangRefReference_queue) != NULL);
+ assert(dvmGetFieldObject(
+ obj, gDvm.offJavaLangRefReference_queueNext) == NULL);
+ numReferencesEnqueued++;
+ callMethod(self, obj, gDvm.methJavaLangRefReference_enqueueInternal);
/* Let the GC collect the object.
*/
dvmReleaseTrackedAlloc(obj, self);
}
- LOGV("Called %d finalizers\n", numFinalizersCalled);
- LOGV("Enqueued %d references\n", numReferencesEnqueued);
+ LOGV("Enqueued %zd references", numReferencesEnqueued);
}
/*
@@ -455,54 +437,6 @@
}
/*
- * Block until all pending heap worker work has finished.
- */
-void dvmWaitForHeapWorkerIdle()
-{
- assert(gDvm.heapWorkerReady);
-
- dvmChangeStatus(NULL, THREAD_VMWAIT);
-
- dvmLockMutex(&gDvm.heapWorkerLock);
-
- /* Wake up the heap worker and wait for it to finish. */
- //TODO(http://b/issue?id=699704): This will deadlock if
- // called from finalize(), enqueue(), or clear(). We
- // need to detect when this is called from the HeapWorker
- // context and just give up.
- dvmSignalHeapWorker(false);
- dvmWaitCond(&gDvm.heapWorkerIdleCond, &gDvm.heapWorkerLock);
-
- dvmUnlockMutex(&gDvm.heapWorkerLock);
-
- dvmChangeStatus(NULL, THREAD_RUNNING);
-}
-
-/*
- * Do not return until any pending heap work has finished. This may
- * or may not happen in the context of the calling thread.
- * No exceptions will escape.
- */
-void dvmRunFinalizationSync()
-{
- if (gDvm.zygote) {
- assert(!gDvm.heapWorkerReady);
-
- /* When in zygote mode, there is no heap worker.
- * Do the work in the current thread.
- */
- dvmLockMutex(&gDvm.heapWorkerLock);
- doHeapWork(dvmThreadSelf());
- dvmUnlockMutex(&gDvm.heapWorkerLock);
- } else {
- /* Outside of zygote mode, we can just ask the
- * heap worker thread to do the work.
- */
- dvmWaitForHeapWorkerIdle();
- }
-}
-
-/*
* Requests that dvmHeapSourceTrim() be called no sooner
* than timeoutSec seconds from now. If timeoutSec
* is zero, any pending trim is cancelled.
diff --git a/vm/alloc/HeapWorker.h b/vm/alloc/HeapWorker.h
index 45587ff..67babc3 100644
--- a/vm/alloc/HeapWorker.h
+++ b/vm/alloc/HeapWorker.h
@@ -40,20 +40,6 @@
void dvmSignalHeapWorker(bool shouldLock);
/*
- * Block until all pending heap worker work has finished.
- */
-void dvmWaitForHeapWorkerIdle(void);
-
-/*
- * Does not return until any pending finalizers have been called.
- * This may or may not happen in the context of the calling thread.
- * No exceptions will escape.
- *
- * Used by zygote, which doesn't have a HeapWorker thread.
- */
-void dvmRunFinalizationSync(void);
-
-/*
* Requests that dvmHeapSourceTrim() be called no sooner
* than timeoutSec seconds from now. If timeoutSec
* is zero, any pending trim is cancelled.
@@ -72,14 +58,6 @@
void dvmAssertHeapWorkerThreadRunning();
/*
- * The type of operation for HeapWorker to perform on an object.
- */
-typedef enum HeapWorkerOperation {
- WORKER_FINALIZE = 0,
- WORKER_ENQUEUE = 1,
-} HeapWorkerOperation;
-
-/*
* Called by the worker thread to get the next object
* to finalize/enqueue/clear. Implemented in Heap.c.
*
@@ -87,6 +65,6 @@
* Must be non-NULL.
* @return The object to operate on, or NULL.
*/
-Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op);
+Object *dvmGetNextHeapWorkerObject();
#endif /*_DALVIK_ALLOC_HEAP_WORKER*/
diff --git a/vm/alloc/MarkSweep.c b/vm/alloc/MarkSweep.c
index bd872df..e5994e6 100644
--- a/vm/alloc/MarkSweep.c
+++ b/vm/alloc/MarkSweep.c
@@ -508,6 +508,7 @@
{
int flags = CLASS_ISREFERENCE |
CLASS_ISWEAKREFERENCE |
+ CLASS_ISFINALIZERREFERENCE |
CLASS_ISPHANTOMREFERENCE;
return GET_CLASS_FLAG_GROUP(obj->clazz, flags);
}
@@ -529,6 +530,14 @@
}
/*
+ * Returns true if the object derives from FinalizerReference.
+ */
+static bool isFinalizerReference(const Object *obj)
+{
+ return referenceClassFlags(obj) & CLASS_ISFINALIZERREFERENCE;
+}
+
+/*
* Returns true if the object derives from PhantomReference.
*/
static bool isPhantomReference(const Object *obj)
@@ -606,6 +615,8 @@
list = &gcHeap->softReferences;
} else if (isWeakReference(obj)) {
list = &gcHeap->weakReferences;
+ } else if (isFinalizerReference(obj)) {
+ list = &gcHeap->finalizerReferences;
} else if (isPhantomReference(obj)) {
list = &gcHeap->phantomReferences;
}
@@ -872,109 +883,35 @@
assert(*list == NULL);
}
-/* Find unreachable objects that need to be finalized,
- * and schedule them for finalization.
+/*
+ * Enqueues finalizer references with white referents. White
+ * referents are blackened, moved to the pendingNext field, and the
+ * referent field is cleared.
*/
-static void scheduleFinalizations(void)
+static void enqueueFinalizerReferences(Object **list)
{
- ReferenceTable newPendingRefs;
- LargeHeapRefTable *finRefs = gDvm.gcHeap->finalizableRefs;
- Object **ref;
- Object **lastRef;
- size_t totalPendCount;
GcMarkContext *ctx = &gDvm.gcHeap->markContext;
-
- /*
- * All reachable objects have been marked.
- * Any unmarked finalizable objects need to be finalized.
- */
-
- /* Create a table that the new pending refs will
- * be added to.
- */
- if (!dvmHeapInitHeapRefTable(&newPendingRefs)) {
- //TODO: mark all finalizable refs and hope that
- // we can schedule them next time. Watch out,
- // because we may be expecting to free up space
- // by calling finalizers.
- LOGE("scheduleFinalizations(): no room for pending finalizations");
- dvmAbort();
- }
-
- /* Walk through finalizableRefs and move any unmarked references
- * to the list of new pending refs.
- */
- totalPendCount = 0;
- while (finRefs != NULL) {
- Object **gapRef;
- size_t newPendCount = 0;
-
- gapRef = ref = finRefs->refs.table;
- lastRef = finRefs->refs.nextEntry;
- while (ref < lastRef) {
- if (!isMarked(*ref, ctx)) {
- if (!dvmAddToReferenceTable(&newPendingRefs, *ref)) {
- //TODO: add the current table and allocate
- // a new, smaller one.
- LOGE("scheduleFinalizations(): "
- "no room for any more pending finalizations: %zd",
- dvmReferenceTableEntries(&newPendingRefs));
- dvmAbort();
- }
- newPendCount++;
- } else {
- /* This ref is marked, so will remain on finalizableRefs.
- */
- if (newPendCount > 0) {
- /* Copy it up to fill the holes.
- */
- *gapRef++ = *ref;
- } else {
- /* No holes yet; don't bother copying.
- */
- gapRef++;
- }
- }
- ref++;
+ size_t referentOffset = gDvm.offJavaLangRefReference_referent;
+ size_t pendingNextOffset = gDvm.offJavaLangRefReference_pendingNext;
+ bool doSignal = false;
+ while (*list != NULL) {
+ Object *ref = dequeuePendingReference(list);
+ Object *referent = dvmGetFieldObject(ref, referentOffset);
+ if (referent != NULL && !isMarked(referent, ctx)) {
+ markObject(referent, ctx);
+ /* If the referent is non-null the reference must queuable. */
+ assert(isEnqueuable(ref));
+ dvmSetFieldObject(ref, pendingNextOffset, referent);
+ clearReference(ref);
+ enqueueReference(ref);
+ doSignal = true;
}
- finRefs->refs.nextEntry = gapRef;
- //TODO: if the table is empty when we're done, free it.
- totalPendCount += newPendCount;
- finRefs = finRefs->next;
}
- LOGV("scheduleFinalizations(): %zd finalizers triggered.", totalPendCount);
- if (totalPendCount == 0) {
- /* No objects required finalization.
- * Free the empty temporary table.
- */
- dvmClearReferenceTable(&newPendingRefs);
- return;
+ if (doSignal) {
+ processMarkStack(ctx);
+ dvmSignalHeapWorker(false);
}
-
- /* Add the new pending refs to the main list.
- */
- if (!dvmHeapAddTableToLargeTable(&gDvm.gcHeap->pendingFinalizationRefs,
- &newPendingRefs))
- {
- LOGE("scheduleFinalizations(): can't insert new pending finalizations");
- dvmAbort();
- }
-
- //TODO: try compacting the main list with a memcpy loop
-
- /* Mark the refs we just moved; we don't want them or their
- * children to get swept yet.
- */
- ref = newPendingRefs.table;
- lastRef = newPendingRefs.nextEntry;
- assert(ref < lastRef);
- while (ref < lastRef) {
- assert(*ref != NULL);
- markObject(*ref, ctx);
- ref++;
- }
- processMarkStack(ctx);
- dvmSignalHeapWorker(false);
+ assert(*list == NULL);
}
/*
@@ -984,15 +921,14 @@
* This is called when Object.<init> completes normally. It's also
* called for clones of finalizable objects.
*/
-void dvmSetFinalizable(Object* obj)
+void dvmSetFinalizable(Object *obj)
{
- dvmLockHeap();
- GcHeap* gcHeap = gDvm.gcHeap;
- if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs, obj)) {
- LOGE_HEAP("No room for any more finalizable objects");
- dvmAbort();
- }
- dvmUnlockHeap();
+ Thread *self = dvmThreadSelf();
+ assert(self != NULL);
+ Method *meth = gDvm.methJavaLangRefFinalizerReferenceAdd;
+ assert(meth != NULL);
+ JValue unused;
+ dvmCallMethod(self, meth, obj, &unused, obj);
}
/*
@@ -1000,10 +936,12 @@
*/
void dvmHeapProcessReferences(Object **softReferences, bool clearSoftRefs,
Object **weakReferences,
+ Object **finalizerReferences,
Object **phantomReferences)
{
assert(softReferences != NULL);
assert(weakReferences != NULL);
+ assert(finalizerReferences != NULL);
assert(phantomReferences != NULL);
/*
* Unless we are in the zygote or required to clear soft
@@ -1023,7 +961,7 @@
* Preserve all white objects with finalize methods and schedule
* them for finalization.
*/
- scheduleFinalizations();
+ enqueueFinalizerReferences(finalizerReferences);
/*
* Clear all f-reachable soft and weak references with white
* referents.
@@ -1039,6 +977,7 @@
*/
assert(*softReferences == NULL);
assert(*weakReferences == NULL);
+ assert(*finalizerReferences == NULL);
assert(*phantomReferences == NULL);
}
diff --git a/vm/alloc/MarkSweep.h b/vm/alloc/MarkSweep.h
index 0672aa8..c9f11e4 100644
--- a/vm/alloc/MarkSweep.h
+++ b/vm/alloc/MarkSweep.h
@@ -53,6 +53,7 @@
void dvmHeapReScanMarkedObjects(void);
void dvmHeapProcessReferences(Object **softReferences, bool clearSoftRefs,
Object **weakReferences,
+ Object **finalizerReferences,
Object **phantomReferences);
void dvmHeapFinishMarkStep(void);
void dvmHeapSweepSystemWeaks(void);
diff --git a/vm/alloc/Visit.c b/vm/alloc/Visit.c
index 1961fc9..e11d583 100644
--- a/vm/alloc/Visit.c
+++ b/vm/alloc/Visit.c
@@ -251,7 +251,6 @@
visitReferenceTable(visitor, &gDvm.jniPinRefTable, 0, ROOT_VM_INTERNAL, arg);
dvmUnlockMutex(&gDvm.jniPinRefLock);
visitLargeHeapRefTable(visitor, gDvm.gcHeap->referenceOperations, ROOT_REFERENCE_CLEANUP, arg);
- visitLargeHeapRefTable(visitor, gDvm.gcHeap->pendingFinalizationRefs, ROOT_FINALIZING, arg);
visitThreads(visitor, arg);
(*visitor)(&gDvm.outOfMemoryObj, 0, ROOT_VM_INTERNAL, arg);
(*visitor)(&gDvm.internalErrorObj, 0, ROOT_VM_INTERNAL, arg);
diff --git a/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S b/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S
index 67a3cc7..6e694d1 100644
--- a/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S
+++ b/vm/mterp/armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S
@@ -14,6 +14,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(${cccc}+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
diff --git a/vm/mterp/c/OP_INVOKE_OBJECT_INIT_RANGE.c b/vm/mterp/c/OP_INVOKE_OBJECT_INIT_RANGE.c
index da0d762..4d2c50c 100644
--- a/vm/mterp/c/OP_INVOKE_OBJECT_INIT_RANGE.c
+++ b/vm/mterp/c/OP_INVOKE_OBJECT_INIT_RANGE.c
@@ -14,6 +14,7 @@
* (by virtue of being nothing but a return-void) and set it now.
*/
if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+ EXPORT_PC();
dvmSetFinalizable(obj);
}
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index 37bfa62..58f7216 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -7421,6 +7421,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(2+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
@@ -10480,6 +10481,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(4+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index f4a36d5..b09793b 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -7743,6 +7743,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(2+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
@@ -10802,6 +10803,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(4+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index e04621c..d5543a9 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -7379,6 +7379,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(2+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
@@ -10434,6 +10435,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(4+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index 7976464..fc3528a 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -7379,6 +7379,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(2+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
@@ -10434,6 +10435,7 @@
ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
beq 1f @ nope, done
+ EXPORT_PC() @ can throw
bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
1: FETCH_ADVANCE_INST(4+1) @ advance to next instr, load rINST
GET_INST_OPCODE(ip) @ ip<- opcode from rINST
diff --git a/vm/mterp/out/InterpC-allstubs.c b/vm/mterp/out/InterpC-allstubs.c
index 354cbd2..2768ba8 100644
--- a/vm/mterp/out/InterpC-allstubs.c
+++ b/vm/mterp/out/InterpC-allstubs.c
@@ -3064,6 +3064,7 @@
* (by virtue of being nothing but a return-void) and set it now.
*/
if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+ EXPORT_PC();
dvmSetFinalizable(obj);
}
diff --git a/vm/mterp/out/InterpC-portdbg.c b/vm/mterp/out/InterpC-portdbg.c
index a77cf9e..348ca5d 100644
--- a/vm/mterp/out/InterpC-portdbg.c
+++ b/vm/mterp/out/InterpC-portdbg.c
@@ -3427,6 +3427,7 @@
* (by virtue of being nothing but a return-void) and set it now.
*/
if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+ EXPORT_PC();
dvmSetFinalizable(obj);
}
diff --git a/vm/mterp/out/InterpC-portstd.c b/vm/mterp/out/InterpC-portstd.c
index 33a7f31..5c3e525 100644
--- a/vm/mterp/out/InterpC-portstd.c
+++ b/vm/mterp/out/InterpC-portstd.c
@@ -3177,6 +3177,7 @@
* (by virtue of being nothing but a return-void) and set it now.
*/
if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+ EXPORT_PC();
dvmSetFinalizable(obj);
}
diff --git a/vm/mterp/out/InterpC-x86-atom.c b/vm/mterp/out/InterpC-x86-atom.c
index 7d9c1af..77ee30f 100644
--- a/vm/mterp/out/InterpC-x86-atom.c
+++ b/vm/mterp/out/InterpC-x86-atom.c
@@ -1423,6 +1423,7 @@
* (by virtue of being nothing but a return-void) and set it now.
*/
if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+ EXPORT_PC();
dvmSetFinalizable(obj);
}
diff --git a/vm/mterp/out/InterpC-x86.c b/vm/mterp/out/InterpC-x86.c
index 5152e85..7ae06e3 100644
--- a/vm/mterp/out/InterpC-x86.c
+++ b/vm/mterp/out/InterpC-x86.c
@@ -1372,6 +1372,7 @@
* (by virtue of being nothing but a return-void) and set it now.
*/
if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
+ EXPORT_PC();
dvmSetFinalizable(obj);
}
diff --git a/vm/native/dalvik_system_VMRuntime.c b/vm/native/dalvik_system_VMRuntime.c
index af0df7a..3642f90 100644
--- a/vm/native/dalvik_system_VMRuntime.c
+++ b/vm/native/dalvik_system_VMRuntime.c
@@ -55,23 +55,6 @@
}
/*
- * public native void runFinalizationSync()
- *
- * Does not return until any pending finalizers have been called.
- * This may or may not happen in the context of the calling thread.
- * No exceptions will escape.
- *
- * Used by zygote, which doesn't have a HeapWorker thread.
- */
-static void Dalvik_dalvik_system_VMRuntime_runFinalizationSync(const u4* args,
- JValue* pResult)
-{
- dvmRunFinalizationSync();
-
- RETURN_VOID();
-}
-
-/*
* public native void startJitCompilation()
*
* Callback function from the framework to indicate that an app has gone
@@ -216,8 +199,6 @@
Dalvik_dalvik_system_VMRuntime_newNonMovableArray },
{ "properties", "()[Ljava/lang/String;",
Dalvik_dalvik_system_VMRuntime_properties },
- { "runFinalizationSync", "()V",
- Dalvik_dalvik_system_VMRuntime_runFinalizationSync },
{ "startJitCompilation", "()V",
Dalvik_dalvik_system_VMRuntime_startJitCompilation },
{ "vmVersion", "()Ljava/lang/String;",
diff --git a/vm/native/dalvik_system_Zygote.c b/vm/native/dalvik_system_Zygote.c
index 905eb9a..6f7e6fa 100644
--- a/vm/native/dalvik_system_Zygote.c
+++ b/vm/native/dalvik_system_Zygote.c
@@ -513,11 +513,11 @@
}
const DalvikNativeMethod dvm_dalvik_system_Zygote[] = {
- { "fork", "()I",
- Dalvik_dalvik_system_Zygote_fork },
- { "forkAndSpecialize", "(II[II[[I)I",
- Dalvik_dalvik_system_Zygote_forkAndSpecialize },
- { "forkSystemServer", "(II[II[[IJJ)I",
- Dalvik_dalvik_system_Zygote_forkSystemServer },
+ { "nativeFork", "()I",
+ Dalvik_dalvik_system_Zygote_fork },
+ { "nativeForkAndSpecialize", "(II[II[[I)I",
+ Dalvik_dalvik_system_Zygote_forkAndSpecialize },
+ { "nativeForkSystemServer", "(II[II[[IJJ)I",
+ Dalvik_dalvik_system_Zygote_forkSystemServer },
{ NULL, NULL, NULL },
};
diff --git a/vm/native/java_lang_Runtime.c b/vm/native/java_lang_Runtime.c
index b5c6a33..112448c 100644
--- a/vm/native/java_lang_Runtime.c
+++ b/vm/native/java_lang_Runtime.c
@@ -93,31 +93,6 @@
}
/*
- * public void runFinalization(boolean forced)
- *
- * Requests that the VM runs finalizers for objects on the heap. If the
- * parameter forced is true, then the VM needs to ensure finalization.
- * Otherwise this only inspires the VM to make a best-effort attempt to
- * run finalizers before returning, but it's not guaranteed to actually
- * do anything.
- */
-static void Dalvik_java_lang_Runtime_runFinalization(const u4* args,
- JValue* pResult)
-{
- bool forced = (args[0] != 0);
-
- dvmWaitForHeapWorkerIdle();
- if (forced) {
- // TODO(Google) Need to explicitly implement this,
- // although dvmWaitForHeapWorkerIdle()
- // should usually provide the "forced"
- // behavior already.
- }
-
- RETURN_VOID();
-}
-
-/*
* public long maxMemory()
*
* Returns GC heap max memory in bytes.
@@ -166,8 +141,6 @@
Dalvik_java_lang_Runtime_nativeExit },
{ "nativeLoad", "(Ljava/lang/String;Ljava/lang/ClassLoader;)Ljava/lang/String;",
Dalvik_java_lang_Runtime_nativeLoad },
- { "runFinalization", "(Z)V",
- Dalvik_java_lang_Runtime_runFinalization },
{ "totalMemory", "()J",
Dalvik_java_lang_Runtime_totalMemory },
{ NULL, NULL, NULL },
diff --git a/vm/oo/Class.c b/vm/oo/Class.c
index 54f63ee..a3f1d1c 100644
--- a/vm/oo/Class.c
+++ b/vm/oo/Class.c
@@ -2704,6 +2704,7 @@
superRefFlags = GET_CLASS_FLAG_GROUP(clazz->super,
CLASS_ISREFERENCE |
CLASS_ISWEAKREFERENCE |
+ CLASS_ISFINALIZERREFERENCE |
CLASS_ISPHANTOMREFERENCE);
SET_CLASS_FLAG(clazz, superRefFlags);
} else if (clazz->classLoader == NULL &&
@@ -2727,6 +2728,10 @@
{
refFlags |= CLASS_ISWEAKREFERENCE;
} else if (strcmp(clazz->descriptor,
+ "Ljava/lang/ref/FinalizerReference;") == 0)
+ {
+ refFlags |= CLASS_ISFINALIZERREFERENCE;
+ } else if (strcmp(clazz->descriptor,
"Ljava/lang/ref/PhantomReference;") == 0)
{
refFlags |= CLASS_ISPHANTOMREFERENCE;
@@ -2744,6 +2749,7 @@
assert(GET_CLASS_FLAG_GROUP(clazz,
CLASS_ISREFERENCE |
CLASS_ISWEAKREFERENCE |
+ CLASS_ISFINALIZERREFERENCE |
CLASS_ISPHANTOMREFERENCE) == 0);
SET_CLASS_FLAG(clazz, refFlags);
diff --git a/vm/oo/Object.h b/vm/oo/Object.h
index 7f2fbf6..b32d5b9 100644
--- a/vm/oo/Object.h
+++ b/vm/oo/Object.h
@@ -80,12 +80,14 @@
CLASS_ISFINALIZABLE = (1<<31), // class/ancestor overrides finalize()
CLASS_ISARRAY = (1<<30), // class is a "[*"
CLASS_ISOBJECTARRAY = (1<<29), // class is a "[L*" or "[[*"
+
CLASS_ISREFERENCE = (1<<28), // class is a soft/weak/phantom ref
// only ISREFERENCE is set --> soft
CLASS_ISWEAKREFERENCE = (1<<27), // class is a weak reference
- CLASS_ISPHANTOMREFERENCE = (1<<26), // class is a phantom reference
+ CLASS_ISFINALIZERREFERENCE = (1<<26), // class is a phantom reference
+ CLASS_ISPHANTOMREFERENCE = (1<<25), // class is a phantom reference
- CLASS_MULTIPLE_DEFS = (1<<25), // DEX verifier: defs in multiple DEXs
+ CLASS_MULTIPLE_DEFS = (1<<24), // DEX verifier: defs in multiple DEXs
/* unlike the others, these can be present in the optimized DEX file */
CLASS_ISOPTIMIZED = (1<<17), // class may contain opt instrs