Refactor reference queues.

Refactored the reference queue processing to reside in the heap code.
This removes significant code duplication in the semispace and
marksweep garbage collectors.

Changed the soft reference behaviour to preserve all soft references
unless the GC requires them to be cleared to avoid an out of memory
error. It may be worth investigating a better heuristic in the
future to preserve soft references by LRU order.

Change-Id: I1f3ff5bd4b3c5149271f4bb4fc94ba199e2f9bc2
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 97cbdd9..60683d0 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -53,6 +53,7 @@
 	gc/collector/semi_space.cc \
 	gc/collector/sticky_mark_sweep.cc \
 	gc/heap.cc \
+	gc/reference_queue.cc \
 	gc/space/bump_pointer_space.cc \
 	gc/space/dlmalloc_space.cc \
 	gc/space/image_space.cc \
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index fc41c7f..61b3f09 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -187,8 +187,8 @@
 void MarkSweep::ProcessReferences(Thread* self) {
   TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-  ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
-                    &finalizer_reference_list_, &phantom_reference_list_);
+  GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
+                               &RecursiveMarkObjectCallback, this);
 }
 
 bool MarkSweep::HandleDirtyObjectsPhase() {
@@ -407,6 +407,13 @@
   }
 }
 
+mirror::Object* MarkSweep::RecursiveMarkObjectCallback(mirror::Object* obj, void* arg) {
+  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
+  mark_sweep->MarkObject(obj);
+  mark_sweep->ProcessMarkStack(true);
+  return obj;
+}
+
 inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
   DCHECK(!IsImmune(obj));
   // Try to take advantage of locality of references within a space, failing this find the space
@@ -992,7 +999,7 @@
   ProcessMarkStack(false);
 }
 
-mirror::Object* MarkSweep::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+mirror::Object* MarkSweep::IsMarkedCallback(Object* object, void* arg) {
   if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
     return object;
   }
@@ -1013,7 +1020,7 @@
 void MarkSweep::SweepSystemWeaks() {
   Runtime* runtime = Runtime::Current();
   timings_.StartSplit("SweepSystemWeaks");
-  runtime->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
+  runtime->SweepSystemWeaks(IsMarkedCallback, this);
   timings_.EndSplit();
 }
 
@@ -1314,40 +1321,7 @@
   DCHECK(klass != nullptr);
   DCHECK(klass->IsReferenceClass());
   DCHECK(obj != NULL);
-  Object* referent = heap_->GetReferenceReferent(obj);
-  if (referent != NULL && !IsMarked(referent)) {
-    if (kCountJavaLangRefs) {
-      ++reference_count_;
-    }
-    Thread* self = Thread::Current();
-    // TODO: Remove these locks, and use atomic stacks for storing references?
-    // We need to check that the references haven't already been enqueued since we can end up
-    // scanning the same reference multiple times due to dirty cards.
-    if (klass->IsSoftReferenceClass()) {
-      MutexLock mu(self, *heap_->GetSoftRefQueueLock());
-      if (!heap_->IsEnqueued(obj)) {
-        heap_->EnqueuePendingReference(obj, &soft_reference_list_);
-      }
-    } else if (klass->IsWeakReferenceClass()) {
-      MutexLock mu(self, *heap_->GetWeakRefQueueLock());
-      if (!heap_->IsEnqueued(obj)) {
-        heap_->EnqueuePendingReference(obj, &weak_reference_list_);
-      }
-    } else if (klass->IsFinalizerReferenceClass()) {
-      MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
-      if (!heap_->IsEnqueued(obj)) {
-        heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
-      }
-    } else if (klass->IsPhantomReferenceClass()) {
-      MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
-      if (!heap_->IsEnqueued(obj)) {
-        heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
-      }
-    } else {
-      LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
-                 << " " << std::hex << klass->GetAccessFlags();
-    }
-  }
+  heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this);
 }
 
 class MarkObjectVisitor {
@@ -1435,43 +1409,6 @@
   timings_.EndSplit();
 }
 
-// Walks the reference list marking any references subject to the
-// reference clearing policy.  References with a black referent are
-// removed from the list.  References with white referents biased
-// toward saving are blackened and also removed from the list.
-void MarkSweep::PreserveSomeSoftReferences(Object** list) {
-  DCHECK(list != NULL);
-  Object* clear = NULL;
-  size_t counter = 0;
-
-  DCHECK(mark_stack_->IsEmpty());
-
-  timings_.StartSplit("PreserveSomeSoftReferences");
-  while (*list != NULL) {
-    Object* ref = heap_->DequeuePendingReference(list);
-    Object* referent = heap_->GetReferenceReferent(ref);
-    if (referent == NULL) {
-      // Referent was cleared by the user during marking.
-      continue;
-    }
-    bool is_marked = IsMarked(referent);
-    if (!is_marked && ((++counter) & 1)) {
-      // Referent is white and biased toward saving, mark it.
-      MarkObject(referent);
-      is_marked = true;
-    }
-    if (!is_marked) {
-      // Referent is white, queue it for clearing.
-      heap_->EnqueuePendingReference(ref, &clear);
-    }
-  }
-  *list = clear;
-  timings_.EndSplit();
-
-  // Restart the mark with the newly black references added to the root set.
-  ProcessMarkStack(true);
-}
-
 inline bool MarkSweep::IsMarked(const Object* object) const
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
   if (IsImmune(object)) {
@@ -1484,98 +1421,6 @@
   return heap_->GetMarkBitmap()->Test(object);
 }
 
-// Unlink the reference list clearing references objects with white
-// referents.  Cleared references registered to a reference queue are
-// scheduled for appending by the heap worker thread.
-void MarkSweep::ClearWhiteReferences(Object** list) {
-  DCHECK(list != NULL);
-  while (*list != NULL) {
-    Object* ref = heap_->DequeuePendingReference(list);
-    Object* referent = heap_->GetReferenceReferent(ref);
-    if (referent != NULL && !IsMarked(referent)) {
-      // Referent is white, clear it.
-      heap_->ClearReferenceReferent(ref);
-      if (heap_->IsEnqueuable(ref)) {
-        heap_->EnqueueReference(ref, &cleared_reference_list_);
-      }
-    }
-  }
-  DCHECK(*list == NULL);
-}
-
-// Enqueues finalizer references with white referents.  White
-// referents are blackened, moved to the zombie field, and the
-// referent field is cleared.
-void MarkSweep::EnqueueFinalizerReferences(Object** list) {
-  DCHECK(list != NULL);
-  timings_.StartSplit("EnqueueFinalizerReferences");
-  MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
-  bool has_enqueued = false;
-  while (*list != NULL) {
-    Object* ref = heap_->DequeuePendingReference(list);
-    Object* referent = heap_->GetReferenceReferent(ref);
-    if (referent != NULL && !IsMarked(referent)) {
-      MarkObject(referent);
-      // If the referent is non-null the reference must queuable.
-      DCHECK(heap_->IsEnqueuable(ref));
-      ref->SetFieldObject(zombie_offset, referent, false);
-      heap_->ClearReferenceReferent(ref);
-      heap_->EnqueueReference(ref, &cleared_reference_list_);
-      has_enqueued = true;
-    }
-  }
-  timings_.EndSplit();
-  if (has_enqueued) {
-    ProcessMarkStack(true);
-  }
-  DCHECK(*list == NULL);
-}
-
-// Process reference class instances and schedule finalizations.
-void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
-                                  Object** weak_references,
-                                  Object** finalizer_references,
-                                  Object** phantom_references) {
-  CHECK(soft_references != NULL);
-  CHECK(weak_references != NULL);
-  CHECK(finalizer_references != NULL);
-  CHECK(phantom_references != NULL);
-  CHECK(mark_stack_->IsEmpty());
-
-  // Unless we are in the zygote or required to clear soft references
-  // with white references, preserve some white referents.
-  if (!clear_soft && !Runtime::Current()->IsZygote()) {
-    PreserveSomeSoftReferences(soft_references);
-  }
-
-  timings_.StartSplit("ProcessReferences");
-  // Clear all remaining soft and weak references with white
-  // referents.
-  ClearWhiteReferences(soft_references);
-  ClearWhiteReferences(weak_references);
-  timings_.EndSplit();
-
-  // Preserve all white objects with finalize methods and schedule
-  // them for finalization.
-  EnqueueFinalizerReferences(finalizer_references);
-
-  timings_.StartSplit("ProcessReferences");
-  // Clear all f-reachable soft and weak references with white
-  // referents.
-  ClearWhiteReferences(soft_references);
-  ClearWhiteReferences(weak_references);
-
-  // Clear all phantom references with white referents.
-  ClearWhiteReferences(phantom_references);
-
-  // At this point all reference lists should be empty.
-  DCHECK(*soft_references == NULL);
-  DCHECK(*weak_references == NULL);
-  DCHECK(*finalizer_references == NULL);
-  DCHECK(*phantom_references == NULL);
-  timings_.EndSplit();
-}
-
 void MarkSweep::UnBindBitmaps() {
   TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -1596,11 +1441,7 @@
 void MarkSweep::FinishPhase() {
   TimingLogger::ScopedSplit split("FinishPhase", &timings_);
   // Can't enqueue references if we hold the mutator lock.
-  Object* cleared_references = GetClearedReferences();
   Heap* heap = GetHeap();
-  timings_.NewSplit("EnqueueClearedReferences");
-  heap->EnqueueClearedReferences(&cleared_references);
-
   timings_.NewSplit("PostGcVerification");
   heap->PostGcVerification(this);
 
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index cc58412..53d85b0 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -189,6 +189,10 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                             Locks::mutator_lock_);
 
+  static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* obj, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
   static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -212,10 +216,7 @@
   // Returns true if the object has its bit set in the mark bitmap.
   bool IsMarked(const mirror::Object* object) const;
 
-  static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  static mirror::Object* SystemWeakIsMarkedArrayCallback(mirror::Object* object, void* arg)
+  static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
@@ -349,13 +350,6 @@
   void ClearWhiteReferences(mirror::Object** list)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
-                         mirror::Object** weak_references,
-                         mirror::Object** finalizer_references,
-                         mirror::Object** phantom_references)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   // Whether or not we count how many of each type of object were scanned.
   static const bool kCountScannedTypes = false;
 
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index aafe7a9..ba98314 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -158,8 +158,8 @@
 void SemiSpace::ProcessReferences(Thread* self) {
   TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-  ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
-                    &finalizer_reference_list_, &phantom_reference_list_);
+  GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
+                               &RecursiveMarkObjectCallback, this);
 }
 
 void SemiSpace::MarkingPhase() {
@@ -344,6 +344,15 @@
   return ret;
 }
 
+Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) {
+  DCHECK(root != nullptr);
+  DCHECK(arg != nullptr);
+  SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg);
+  mirror::Object* ret = semi_space->MarkObject(root);
+  semi_space->ProcessMarkStack(true);
+  return ret;
+}
+
 Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
   DCHECK(root != nullptr);
   DCHECK(arg != nullptr);
@@ -374,13 +383,13 @@
   return obj;
 }
 
-mirror::Object* SemiSpace::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) {
   return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
 }
 
 void SemiSpace::SweepSystemWeaks() {
   timings_.StartSplit("SweepSystemWeaks");
-  Runtime::Current()->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
+  Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
   timings_.EndSplit();
 }
 
@@ -487,45 +496,7 @@
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
 // marked, put it on the appropriate list in the heap for later processing.
 void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
-  DCHECK(klass != nullptr);
-  DCHECK(klass->IsReferenceClass());
-  DCHECK(obj != nullptr);
-  Object* referent = heap_->GetReferenceReferent(obj);
-  if (referent != nullptr) {
-    Object* forward_address = GetMarkedForwardAddress(referent);
-    if (forward_address == nullptr) {
-      Thread* self = Thread::Current();
-      // TODO: Remove these locks, and use atomic stacks for storing references?
-      // We need to check that the references haven't already been enqueued since we can end up
-      // scanning the same reference multiple times due to dirty cards.
-      if (klass->IsSoftReferenceClass()) {
-        MutexLock mu(self, *heap_->GetSoftRefQueueLock());
-        if (!heap_->IsEnqueued(obj)) {
-          heap_->EnqueuePendingReference(obj, &soft_reference_list_);
-        }
-      } else if (klass->IsWeakReferenceClass()) {
-        MutexLock mu(self, *heap_->GetWeakRefQueueLock());
-        if (!heap_->IsEnqueued(obj)) {
-          heap_->EnqueuePendingReference(obj, &weak_reference_list_);
-        }
-      } else if (klass->IsFinalizerReferenceClass()) {
-        MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
-        if (!heap_->IsEnqueued(obj)) {
-          heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
-        }
-      } else if (klass->IsPhantomReferenceClass()) {
-        MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
-        if (!heap_->IsEnqueued(obj)) {
-          heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
-        }
-      } else {
-        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
-                   << klass->GetAccessFlags();
-      }
-    } else if (referent != forward_address) {
-      heap_->SetReferenceReferent(obj, forward_address);
-    }
-  }
+  heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
 }
 
 // Visit all of the references of an object and update.
@@ -555,48 +526,6 @@
   timings_.EndSplit();
 }
 
-// Walks the reference list marking any references subject to the
-// reference clearing policy.  References with a black referent are
-// removed from the list.  References with white referents biased
-// toward saving are blackened and also removed from the list.
-void SemiSpace::PreserveSomeSoftReferences(Object** list) {
-  DCHECK(list != NULL);
-  Object* clear = NULL;
-  size_t counter = 0;
-  DCHECK(mark_stack_->IsEmpty());
-  timings_.StartSplit("PreserveSomeSoftReferences");
-  while (*list != NULL) {
-    Object* ref = heap_->DequeuePendingReference(list);
-    Object* referent = heap_->GetReferenceReferent(ref);
-    if (referent == NULL) {
-      // Referent was cleared by the user during marking.
-      continue;
-    }
-    Object* forward_address = GetMarkedForwardAddress(referent);
-    bool is_marked = forward_address != nullptr;
-    if (!is_marked && ((++counter) & 1)) {
-      // Referent is white and biased toward saving, mark it.
-      forward_address = MarkObject(referent);
-      if (referent != forward_address) {
-        // Update the referent if we moved it.
-        heap_->SetReferenceReferent(ref, forward_address);
-      }
-    } else {
-      if (!is_marked) {
-        // Referent is white, queue it for clearing.
-        heap_->EnqueuePendingReference(ref, &clear);
-      } else if (referent != forward_address) {
-        CHECK(forward_address != nullptr);
-        heap_->SetReferenceReferent(ref, forward_address);
-      }
-    }
-  }
-  *list = clear;
-  timings_.EndSplit();
-  // Restart the mark with the newly black references added to the root set.
-  ProcessMarkStack(true);
-}
-
 inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
   // All immune objects are assumed marked.
@@ -618,112 +547,6 @@
   return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
 }
 
-// Unlink the reference list clearing references objects with white
-// referents.  Cleared references registered to a reference queue are
-// scheduled for appending by the heap worker thread.
-void SemiSpace::ClearWhiteReferences(Object** list) {
-  DCHECK(list != NULL);
-  while (*list != NULL) {
-    Object* ref = heap_->DequeuePendingReference(list);
-    Object* referent = heap_->GetReferenceReferent(ref);
-    if (referent != nullptr) {
-      Object* forward_address = GetMarkedForwardAddress(referent);
-      if (forward_address == nullptr) {
-        // Referent is white, clear it.
-        heap_->ClearReferenceReferent(ref);
-        if (heap_->IsEnqueuable(ref)) {
-          heap_->EnqueueReference(ref, &cleared_reference_list_);
-        }
-      } else if (referent != forward_address) {
-        heap_->SetReferenceReferent(ref, forward_address);
-      }
-    }
-  }
-  DCHECK(*list == NULL);
-}
-
-// Enqueues finalizer references with white referents.  White
-// referents are blackened, moved to the zombie field, and the
-// referent field is cleared.
-void SemiSpace::EnqueueFinalizerReferences(Object** list) {
-  // *list = NULL;
-  // return;
-  DCHECK(list != NULL);
-  timings_.StartSplit("EnqueueFinalizerReferences");
-  MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
-  bool has_enqueued = false;
-  while (*list != NULL) {
-    Object* ref = heap_->DequeuePendingReference(list);
-    Object* referent = heap_->GetReferenceReferent(ref);
-    if (referent != nullptr) {
-      Object* forward_address = GetMarkedForwardAddress(referent);
-      // Not marked.
-      if (forward_address == nullptr) {
-        forward_address = MarkObject(referent);
-        // If the referent is non-null the reference must queuable.
-        DCHECK(heap_->IsEnqueuable(ref));
-        // Move the referent to the zombie field.
-        ref->SetFieldObject(zombie_offset, forward_address, false);
-        heap_->ClearReferenceReferent(ref);
-        heap_->EnqueueReference(ref, &cleared_reference_list_);
-        has_enqueued = true;
-      } else if (referent != forward_address) {
-        heap_->SetReferenceReferent(ref, forward_address);
-      }
-    }
-  }
-  timings_.EndSplit();
-  if (has_enqueued) {
-    ProcessMarkStack(true);
-  }
-  DCHECK(*list == NULL);
-}
-
-// Process reference class instances and schedule finalizations.
-void SemiSpace::ProcessReferences(Object** soft_references, bool clear_soft,
-                                  Object** weak_references,
-                                  Object** finalizer_references,
-                                  Object** phantom_references) {
-  CHECK(soft_references != NULL);
-  CHECK(weak_references != NULL);
-  CHECK(finalizer_references != NULL);
-  CHECK(phantom_references != NULL);
-  CHECK(mark_stack_->IsEmpty());
-
-  // Unless we are in the zygote or required to clear soft references
-  // with white references, preserve some white referents.
-  if (!clear_soft && !Runtime::Current()->IsZygote()) {
-    PreserveSomeSoftReferences(soft_references);
-  }
-
-  timings_.StartSplit("ProcessReferences");
-  // Clear all remaining soft and weak references with white
-  // referents.
-  ClearWhiteReferences(soft_references);
-  ClearWhiteReferences(weak_references);
-  timings_.EndSplit();
-
-  // Preserve all white objects with finalize methods and schedule
-  // them for finalization.
-  EnqueueFinalizerReferences(finalizer_references);
-
-  timings_.StartSplit("ProcessReferences");
-  // Clear all f-reachable soft and weak references with white
-  // referents.
-  ClearWhiteReferences(soft_references);
-  ClearWhiteReferences(weak_references);
-
-  // Clear all phantom references with white referents.
-  ClearWhiteReferences(phantom_references);
-
-  // At this point all reference lists should be empty.
-  DCHECK(*soft_references == NULL);
-  DCHECK(*weak_references == NULL);
-  DCHECK(*finalizer_references == NULL);
-  DCHECK(*phantom_references == NULL);
-  timings_.EndSplit();
-}
-
 void SemiSpace::UnBindBitmaps() {
   TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -751,11 +574,7 @@
 void SemiSpace::FinishPhase() {
   TimingLogger::ScopedSplit split("FinishPhase", &timings_);
   // Can't enqueue references if we hold the mutator lock.
-  Object* cleared_references = GetClearedReferences();
   Heap* heap = GetHeap();
-  timings_.NewSplit("EnqueueClearedReferences");
-  heap->EnqueueClearedReferences(&cleared_references);
-
   timings_.NewSplit("PostGcVerification");
   heap->PostGcVerification(this);
 
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 13d5195..0f0cae1 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -150,12 +150,15 @@
   static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
+  static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* root, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
  protected:
   // Returns null if the object is not marked, otherwise returns the forwarding address (same as
   // object for non movable things).
   mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const;
 
-  static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
+  static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index c209479..f446fcf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -82,10 +82,11 @@
       long_gc_log_threshold_(long_gc_log_threshold),
       ignore_max_footprint_(ignore_max_footprint),
       have_zygote_space_(false),
-      soft_ref_queue_lock_(NULL),
-      weak_ref_queue_lock_(NULL),
-      finalizer_ref_queue_lock_(NULL),
-      phantom_ref_queue_lock_(NULL),
+      soft_reference_queue_(this),
+      weak_reference_queue_(this),
+      finalizer_reference_queue_(this),
+      phantom_reference_queue_(this),
+      cleared_references_(this),
       is_gc_running_(false),
       last_gc_type_(collector::kGcTypeNone),
       next_gc_type_(collector::kGcTypePartial),
@@ -233,13 +234,6 @@
   gc_complete_lock_ = new Mutex("GC complete lock");
   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
                                                 *gc_complete_lock_));
-
-  // Create the reference queue locks, this is required so for parallel object scanning in the GC.
-  soft_ref_queue_lock_ = new Mutex("Soft reference queue lock");
-  weak_ref_queue_lock_ = new Mutex("Weak reference queue lock");
-  finalizer_ref_queue_lock_ = new Mutex("Finalizer reference queue lock");
-  phantom_ref_queue_lock_ = new Mutex("Phantom reference queue lock");
-
   last_gc_time_ns_ = NanoTime();
   last_gc_size_ = GetBytesAllocated();
 
@@ -599,10 +593,6 @@
   STLDeleteElements(&continuous_spaces_);
   STLDeleteElements(&discontinuous_spaces_);
   delete gc_complete_lock_;
-  delete soft_ref_queue_lock_;
-  delete weak_ref_queue_lock_;
-  delete finalizer_ref_queue_lock_;
-  delete phantom_ref_queue_lock_;
   VLOG(heap) << "Finished ~Heap()";
 }
 
@@ -640,6 +630,106 @@
   return FindDiscontinuousSpaceFromObject(obj, true);
 }
 
+struct SoftReferenceArgs {
+  RootVisitor* is_marked_callback_;
+  RootVisitor* recursive_mark_callback_;
+  void* arg_;
+};
+
+mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
+  SoftReferenceArgs* args  = reinterpret_cast<SoftReferenceArgs*>(arg);
+  // TODO: Not preserve all soft references.
+  return args->recursive_mark_callback_(obj, args->arg_);
+}
+
+// Process reference class instances and schedule finalizations.
+void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
+                             RootVisitor* is_marked_callback,
+                             RootVisitor* recursive_mark_object_callback, void* arg) {
+  // Unless we are in the zygote or required to clear soft references with white references,
+  // preserve some white referents.
+  if (!clear_soft && !Runtime::Current()->IsZygote()) {
+    SoftReferenceArgs soft_reference_args;
+    soft_reference_args.is_marked_callback_ = is_marked_callback;
+    soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
+    soft_reference_args.arg_ = arg;
+    soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
+                                                     &soft_reference_args);
+  }
+  timings.StartSplit("ProcessReferences");
+  // Clear all remaining soft and weak references with white referents.
+  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  timings.EndSplit();
+  // Preserve all white objects with finalize methods and schedule them for finalization.
+  timings.StartSplit("EnqueueFinalizerReferences");
+  finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
+                                                        recursive_mark_object_callback, arg);
+  timings.EndSplit();
+  timings.StartSplit("ProcessReferences");
+  // Clear all f-reachable soft and weak references with white referents.
+  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  // Clear all phantom references with white referents.
+  phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  // At this point all reference queues other than the cleared references should be empty.
+  DCHECK(soft_reference_queue_.IsEmpty());
+  DCHECK(weak_reference_queue_.IsEmpty());
+  DCHECK(finalizer_reference_queue_.IsEmpty());
+  DCHECK(phantom_reference_queue_.IsEmpty());
+  timings.EndSplit();
+}
+
+bool Heap::IsEnqueued(mirror::Object* ref) const {
+  // Since the references are stored as cyclic lists it means that once enqueued, the pending next
+  // will always be non-null.
+  return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
+}
+
+bool Heap::IsEnqueuable(const mirror::Object* ref) const {
+  DCHECK(ref != nullptr);
+  const mirror::Object* queue =
+      ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false);
+  const mirror::Object* queue_next =
+      ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false);
+  return queue != nullptr && queue_next == nullptr;
+}
+
+// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
+                                  RootVisitor mark_visitor, void* arg) {
+  DCHECK(klass != nullptr);
+  DCHECK(klass->IsReferenceClass());
+  DCHECK(obj != nullptr);
+  mirror::Object* referent = GetReferenceReferent(obj);
+  if (referent != nullptr) {
+    mirror::Object* forward_address = mark_visitor(referent, arg);
+    // Null means that the object is not currently marked.
+    if (forward_address == nullptr) {
+      Thread* self = Thread::Current();
+      // TODO: Remove these locks, and use atomic stacks for storing references?
+      // We need to check that the references haven't already been enqueued since we can end up
+      // scanning the same reference multiple times due to dirty cards.
+      if (klass->IsSoftReferenceClass()) {
+        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+      } else if (klass->IsWeakReferenceClass()) {
+        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+      } else if (klass->IsFinalizerReferenceClass()) {
+        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+      } else if (klass->IsPhantomReferenceClass()) {
+        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+      } else {
+        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+                   << klass->GetAccessFlags();
+      }
+    } else if (referent != forward_address) {
+      // Referent is already marked and we need to update it.
+      SetReferenceReferent(obj, forward_address);
+    }
+  }
+}
+
 space::ImageSpace* Heap::GetImageSpace() const {
   for (const auto& space : continuous_spaces_) {
     if (space->IsImageSpace()) {
@@ -843,7 +933,6 @@
     if (i > 0) {
       NanoSleep(MsToNs(10));
     }
-
     if (search_allocation_stack) {
       if (sorted) {
         if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
@@ -1442,6 +1531,9 @@
   total_objects_freed_ever_ += collector->GetFreedObjects();
   total_bytes_freed_ever_ += collector->GetFreedBytes();
 
+  // Enqueue cleared references.
+  EnqueueClearedReferences();
+
   // Grow the heap so that we know when to perform the next GC.
   GrowForUtilization(gc_type, collector->GetDurationNs());
 
@@ -2072,72 +2164,6 @@
   return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
 }
 
-void Heap::ClearReferenceReferent(mirror::Object* reference) {
-  DCHECK(reference != NULL);
-  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
-  reference->SetFieldObject(reference_referent_offset_, nullptr, true);
-}
-
-// Returns true if the reference object has not yet been enqueued.
-bool Heap::IsEnqueuable(const mirror::Object* ref) {
-  DCHECK(ref != NULL);
-  const mirror::Object* queue =
-      ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false);
-  const mirror::Object* queue_next =
-      ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false);
-  return (queue != NULL) && (queue_next == NULL);
-}
-
-void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) {
-  DCHECK(ref != NULL);
-  CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL);
-  CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL);
-  EnqueuePendingReference(ref, cleared_reference_list);
-}
-
-bool Heap::IsEnqueued(mirror::Object* ref) {
-  // Since the references are stored as cyclic lists it means that once enqueued, the pending next
-  // will always be non-null.
-  return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
-}
-
-void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) {
-  DCHECK(ref != NULL);
-  DCHECK(list != NULL);
-  if (*list == NULL) {
-    // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
-    ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
-    *list = ref;
-  } else {
-    mirror::Object* head =
-        (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
-    ref->SetFieldObject(reference_pendingNext_offset_, head, false);
-    (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
-  }
-}
-
-mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) {
-  DCHECK(list != NULL);
-  DCHECK(*list != NULL);
-  mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_,
-                                                                  false);
-  mirror::Object* ref;
-
-  // Note: the following code is thread-safe because it is only called from ProcessReferences which
-  // is single threaded.
-  if (*list == head) {
-    ref = *list;
-    *list = NULL;
-  } else {
-    mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_,
-                                                                 false);
-    (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
-    ref = head;
-  }
-  ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
-  return ref;
-}
-
 void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
   ScopedObjectAccess soa(self);
   JValue result;
@@ -2168,20 +2194,18 @@
   }
 }
 
-void Heap::EnqueueClearedReferences(mirror::Object** cleared) {
-  DCHECK(cleared != nullptr);
-  mirror::Object* list = *cleared;
-  if (list != nullptr) {
+void Heap::EnqueueClearedReferences() {
+  if (!cleared_references_.IsEmpty()) {
     // When a runtime isn't started there are no reference queues to care about so ignore.
     if (LIKELY(Runtime::Current()->IsStarted())) {
       ScopedObjectAccess soa(Thread::Current());
       JValue result;
       ArgArray arg_array(NULL, 0);
-      arg_array.Append(reinterpret_cast<uint32_t>(list));
+      arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList()));
       soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
           arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
     }
-    *cleared = nullptr;
+    cleared_references_.Clear();
   }
 }
 
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 52343bc..08bec99 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -31,6 +31,7 @@
 #include "jni.h"
 #include "locks.h"
 #include "offsets.h"
+#include "reference_queue.h"
 #include "root_visitor.h"
 #include "safe_map.h"
 #include "thread_pool.h"
@@ -289,32 +290,26 @@
                            MemberOffset reference_queueNext_offset,
                            MemberOffset reference_pendingNext_offset,
                            MemberOffset finalizer_reference_zombie_offset);
-
-  void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* GetReferenceReferent(mirror::Object* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ClearReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  // Returns true if the reference object has not yet been enqueued.
-  bool IsEnqueuable(const mirror::Object* ref);
-  void EnqueueReference(mirror::Object* ref, mirror::Object** list)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsEnqueued(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void EnqueuePendingReference(mirror::Object* ref, mirror::Object** list)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* DequeuePendingReference(mirror::Object** list)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  MemberOffset GetReferencePendingNextOffset() {
-    DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
+  MemberOffset GetReferenceReferentOffset() const {
+    return reference_referent_offset_;
+  }
+  MemberOffset GetReferenceQueueOffset() const {
+    return reference_queue_offset_;
+  }
+  MemberOffset GetReferenceQueueNextOffset() const {
+    return reference_queueNext_offset_;
+  }
+  MemberOffset GetReferencePendingNextOffset() const {
     return reference_pendingNext_offset_;
   }
-
-  MemberOffset GetFinalizerReferenceZombieOffset() {
-    DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
+  MemberOffset GetFinalizerReferenceZombieOffset() const {
     return finalizer_reference_zombie_offset_;
   }
+  static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
+  void ProcessReferences(TimingLogger& timings, bool clear_soft, RootVisitor* is_marked_callback,
+                         RootVisitor* recursive_mark_object_callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Enable verification of object references when the runtime is sufficiently initialized.
   void EnableObjectValidation() {
@@ -460,22 +455,6 @@
     return large_object_space_;
   }
 
-  Mutex* GetSoftRefQueueLock() {
-    return soft_ref_queue_lock_;
-  }
-
-  Mutex* GetWeakRefQueueLock() {
-    return weak_ref_queue_lock_;
-  }
-
-  Mutex* GetFinalizerRefQueueLock() {
-    return finalizer_ref_queue_lock_;
-  }
-
-  Mutex* GetPhantomRefQueueLock() {
-    return phantom_ref_queue_lock_;
-  }
-
   void DumpSpaces(std::ostream& stream = LOG(INFO));
 
   // GC performance measuring
@@ -575,8 +554,20 @@
   bool IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow);
 
   // Pushes a list of cleared references out to the managed heap.
-  void EnqueueClearedReferences(mirror::Object** cleared_references);
-
+  void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* GetReferenceReferent(mirror::Object* reference)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ClearReferenceReferent(mirror::Object* reference)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetReferenceReferent(reference, nullptr);
+  }
+  void EnqueueClearedReferences();
+  // Returns true if the reference object has not yet been enqueued.
+  bool IsEnqueuable(const mirror::Object* ref) const;
+  bool IsEnqueued(mirror::Object* ref) const;
+  void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, RootVisitor mark_visitor,
+                              void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Print a reference queue.
   void PrintReferenceQueue(std::ostream& os, mirror::Object** queue);
 
@@ -699,12 +690,12 @@
   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
 
-  // Mutexes held when adding references to reference queues.
-  // TODO: move to a UniquePtr, currently annotalysis is confused that UniquePtr isn't lockable.
-  Mutex* soft_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  Mutex* weak_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  Mutex* finalizer_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  Mutex* phantom_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Reference queues.
+  ReferenceQueue soft_reference_queue_;
+  ReferenceQueue weak_reference_queue_;
+  ReferenceQueue finalizer_reference_queue_;
+  ReferenceQueue phantom_reference_queue_;
+  ReferenceQueue cleared_references_;
 
   // True while the garbage collector is running.
   volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
@@ -819,16 +810,12 @@
 
   // offset of java.lang.ref.Reference.referent
   MemberOffset reference_referent_offset_;
-
   // offset of java.lang.ref.Reference.queue
   MemberOffset reference_queue_offset_;
-
   // offset of java.lang.ref.Reference.queueNext
   MemberOffset reference_queueNext_offset_;
-
   // offset of java.lang.ref.Reference.pendingNext
   MemberOffset reference_pendingNext_offset_;
-
   // offset of java.lang.ref.FinalizerReference.zombie
   MemberOffset finalizer_reference_zombie_offset_;
 
@@ -861,6 +848,7 @@
 
   friend class collector::MarkSweep;
   friend class collector::SemiSpace;
+  friend class ReferenceQueue;
   friend class VerifyReferenceCardVisitor;
   friend class VerifyReferenceVisitor;
   friend class VerifyObjectVisitor;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
new file mode 100644
index 0000000..d006349
--- /dev/null
+++ b/runtime/gc/reference_queue.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reference_queue.h"
+
+#include "accounting/card_table-inl.h"
+#include "heap.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+namespace gc {
+
+ReferenceQueue::ReferenceQueue(Heap* heap)
+    : lock_("reference queue lock"),
+      heap_(heap),
+      list_(nullptr) {
+}
+
+void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref) {
+  DCHECK(ref != NULL);
+  MutexLock mu(self, lock_);
+  if (!heap_->IsEnqueued(ref)) {
+    EnqueuePendingReference(ref);
+  }
+}
+
+void ReferenceQueue::EnqueueReference(mirror::Object* ref) {
+  CHECK(heap_->IsEnqueuable(ref));
+  EnqueuePendingReference(ref);
+}
+
+void ReferenceQueue::EnqueuePendingReference(mirror::Object* ref) {
+  DCHECK(ref != NULL);
+  MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
+  DCHECK_NE(pending_next_offset.Uint32Value(), 0U);
+  if (IsEmpty()) {
+    // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
+    ref->SetFieldObject(pending_next_offset, ref, false);
+    list_ = ref;
+  } else {
+    mirror::Object* head =
+        list_->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+    ref->SetFieldObject(pending_next_offset, head, false);
+    list_->SetFieldObject(pending_next_offset, ref, false);
+  }
+}
+
+mirror::Object* ReferenceQueue::DequeuePendingReference() {
+  DCHECK(!IsEmpty());
+  MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
+  mirror::Object* head = list_->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+  DCHECK(head != nullptr);
+  mirror::Object* ref;
+  // Note: the following code is thread-safe because it is only called from ProcessReferences which
+  // is single threaded.
+  if (list_ == head) {
+    ref = list_;
+    list_ = nullptr;
+  } else {
+    mirror::Object* next = head->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+    list_->SetFieldObject(pending_next_offset, next, false);
+    ref = head;
+  }
+  ref->SetFieldObject(pending_next_offset, nullptr, false);
+  return ref;
+}
+
+void ReferenceQueue::Dump(std::ostream& os) const {
+  mirror::Object* cur = list_;
+  os << "Reference starting at list_=" << list_ << "\n";
+  while (cur != nullptr) {
+    mirror::Object* pending_next =
+        cur->GetFieldObject<mirror::Object*>(heap_->GetReferencePendingNextOffset(), false);
+    os << "PendingNext=" << pending_next;
+    if (cur->GetClass()->IsFinalizerReferenceClass()) {
+      os << " Zombie=" <<
+          cur->GetFieldObject<mirror::Object*>(heap_->GetFinalizerReferenceZombieOffset(), false);
+    }
+    os << "\n";
+    cur = pending_next;
+  }
+}
+
+void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references, RootVisitor visitor,
+                                          void* arg) {
+  while (!IsEmpty()) {
+    mirror::Object* ref = DequeuePendingReference();
+    mirror::Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      mirror::Object* forward_address = visitor(referent, arg);
+      if (forward_address == nullptr) {
+        // Referent is white, clear it.
+        heap_->ClearReferenceReferent(ref);
+        if (heap_->IsEnqueuable(ref)) {
+          cleared_references.EnqueuePendingReference(ref);
+        }
+      } else if (referent != forward_address) {
+        // Object moved, need to updated the referrent.
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+}
+
+void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
+                                                RootVisitor is_marked_callback,
+                                                RootVisitor recursive_mark_callback, void* arg) {
+  while (!IsEmpty()) {
+    mirror::Object* ref = DequeuePendingReference();
+    mirror::Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      mirror::Object* forward_address = is_marked_callback(referent, arg);
+      // If the referent isn't marked, mark it and update the
+      if (forward_address == nullptr) {
+        forward_address = recursive_mark_callback(referent, arg);
+        // If the referent is non-null the reference must queuable.
+        DCHECK(heap_->IsEnqueuable(ref));
+        // Move the updated referent to the zombie field.
+        ref->SetFieldObject(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false);
+        heap_->ClearReferenceReferent(ref);
+        cleared_references.EnqueueReference(ref);
+      } else if (referent != forward_address) {
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+}
+
+void ReferenceQueue::PreserveSomeSoftReferences(RootVisitor preserve_callback, void* arg) {
+  ReferenceQueue cleared(heap_);
+  while (!IsEmpty()) {
+    mirror::Object* ref = DequeuePendingReference();
+    mirror::Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      mirror::Object* forward_address = preserve_callback(referent, arg);
+      if (forward_address == nullptr) {
+        // Either the reference isn't marked or we don't wish to preserve it.
+        cleared.EnqueuePendingReference(ref);
+      } else {
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  list_ = cleared.GetList();
+}
+
+}  // namespace gc
+}  // namespace art
+
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
new file mode 100644
index 0000000..89589c3
--- /dev/null
+++ b/runtime/gc/reference_queue.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_REFERENCE_QUEUE_H_
+#define ART_RUNTIME_GC_REFERENCE_QUEUE_H_
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "atomic_integer.h"
+#include "base/timing_logger.h"
+#include "globals.h"
+#include "gtest/gtest.h"
+#include "jni.h"
+#include "locks.h"
+#include "offsets.h"
+#include "root_visitor.h"
+#include "thread_pool.h"
+
+namespace art {
+namespace gc {
+
+class Heap;
+
+// Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
+// appropriate java.lang.ref.ReferenceQueue. The linked list is maintained in the
+// java.lang.ref.Reference objects.
+class ReferenceQueue {
+ public:
+  explicit ReferenceQueue(Heap* heap);
+  // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
+  // since it uses a lock to avoid a race between checking for the references presence and adding
+  // it.
+  void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+  // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
+  // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
+  // overhead.
+  void EnqueueReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnqueuePendingReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Enqueues finalizer references with white referents.  White referents are blackened, moved to the
+  // zombie field, and the referent field is cleared.
+  void EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
+                                  RootVisitor is_marked_callback,
+                                  RootVisitor recursive_mark_callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Walks the reference list marking any references subject to the reference clearing policy.
+  // References with a black referent are removed from the list.  References with white referents
+  // biased toward saving are blackened and also removed from the list.
+  void PreserveSomeSoftReferences(RootVisitor preserve_callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Unlink the reference list clearing references objects with white referents.  Cleared references
+  // registered to a reference queue are scheduled for appending by the heap worker thread.
+  void ClearWhiteReferences(ReferenceQueue& cleared_references, RootVisitor visitor, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Dump(std::ostream& os) const
+        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsEmpty() const {
+    return list_ == nullptr;
+  }
+  void Clear() {
+    list_ = nullptr;
+  }
+  mirror::Object* GetList() {
+    return list_;
+  }
+
+ private:
+  // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
+  // calling AtomicEnqueueIfNotEnqueued.
+  Mutex lock_;
+  // The heap contains the reference offsets.
+  Heap* const heap_;
+  // The actual reference list. Not a root since it will be nullptr when the GC is not running.
+  mirror::Object* list_;
+};
+
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_REFERENCE_QUEUE_H_