Refactor reference code into mirror namespace.

Added two new files: mirror/reference.h and mirror/reference-inl.h.

Change-Id: Ibe3ff6379aef7096ff130594535b7f7c0b7dabce
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 7a54bb1..cef9954 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -504,13 +504,6 @@
   CHECK_STREQ(fh.GetName(), "zombie");
   CHECK_STREQ(fh.GetTypeDescriptor(), "Ljava/lang/Object;");
 
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->SetReferenceOffsets(referent->GetOffset(),
-                            queue->GetOffset(),
-                            queueNext->GetOffset(),
-                            pendingNext->GetOffset(),
-                            zombie->GetOffset());
-
   // ensure all class_roots_ are initialized
   for (size_t i = 0; i < kClassRootsMax; i++) {
     ClassRoot class_root = static_cast<ClassRoot>(i);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index d0555ff..7eb7b01 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -32,6 +32,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/proxy.h"
+#include "mirror/reference.h"
 #include "mirror/stack_trace_element.h"
 #include "sirt_ref.h"
 
@@ -624,6 +625,25 @@
   };
 };
 
+struct ReferenceOffsets : public CheckOffsets<mirror::Reference> {
+  ReferenceOffsets() : CheckOffsets<mirror::Reference>(false, "Ljava/lang/ref/Reference;") {
+    // alphabetical references
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, pending_next_),  "pendingNext"));
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_),         "queue"));
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_next_),    "queueNext"));
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, referent_),      "referent"));
+  };
+};
+
+struct FinalizerReferenceOffsets : public CheckOffsets<mirror::FinalizerReference> {
+  FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(false, "Ljava/lang/ref/FinalizerReference;") {
+    // alphabetical references
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, next_),   "next"));
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, prev_),   "prev"));
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, zombie_), "zombie"));
+  };
+};
+
 // C++ fields must exactly match the fields in the Java classes. If this fails,
 // reorder the fields in the C++ class. Managed class fields are ordered by
 // ClassLinker::LinkFields.
@@ -639,6 +659,8 @@
   EXPECT_TRUE(ClassLoaderOffsets().Check());
   EXPECT_TRUE(ProxyOffsets().Check());
   EXPECT_TRUE(DexCacheOffsets().Check());
+  EXPECT_TRUE(ReferenceOffsets().Check());
+  EXPECT_TRUE(FinalizerReferenceOffsets().Check());
 
   EXPECT_TRUE(ClassClassOffsets().Check());
   EXPECT_TRUE(StringClassOffsets().Check());
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 8b9f60e..4f3ad32 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -44,6 +44,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache.h"
+#include "mirror/reference-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array.h"
 #include "mirror/object_array-inl.h"
@@ -1189,9 +1190,7 @@
 // the heap for later processing.
 void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
   DCHECK(klass != nullptr);
-  DCHECK(klass->IsReferenceClass());
-  DCHECK(obj != NULL);
-  heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this);
+  heap_->DelayReferenceReferent(klass, obj->AsReference(), IsMarkedCallback, this);
 }
 
 class MarkObjectVisitor {
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 89694d4..23b155c 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -44,6 +44,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache.h"
+#include "mirror/reference-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array.h"
 #include "mirror/object_array-inl.h"
@@ -633,7 +634,7 @@
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
 // marked, put it on the appropriate list in the heap for later processing.
 void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
-  heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
+  heap_->DelayReferenceReferent(klass, obj->AsReference(), MarkedForwardingAddressCallback, this);
 }
 
 class SemiSpaceMarkObjectVisitor {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 13dd90e..76b94fd 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -54,6 +54,7 @@
 #include "mirror/object.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/reference-inl.h"
 #include "object_utils.h"
 #include "os.h"
 #include "runtime.h"
@@ -103,11 +104,6 @@
       ignore_max_footprint_(ignore_max_footprint),
       have_zygote_space_(false),
       large_object_threshold_(std::numeric_limits<size_t>::max()),  // Starts out disabled.
-      soft_reference_queue_(this),
-      weak_reference_queue_(this),
-      finalizer_reference_queue_(this),
-      phantom_reference_queue_(this),
-      cleared_references_(this),
       collector_type_running_(kCollectorTypeNone),
       last_gc_type_(collector::kGcTypeNone),
       next_gc_type_(collector::kGcTypePartial),
@@ -144,11 +140,6 @@
       current_non_moving_allocator_(kAllocatorTypeNonMoving),
       bump_pointer_space_(nullptr),
       temp_space_(nullptr),
-      reference_referent_offset_(0),
-      reference_queue_offset_(0),
-      reference_queueNext_offset_(0),
-      reference_pendingNext_offset_(0),
-      finalizer_reference_zombie_offset_(0),
       min_free_(min_free),
       max_free_(max_free),
       target_utilization_(target_utilization),
@@ -792,29 +783,12 @@
   timings.EndSplit();
 }
 
-bool Heap::IsEnqueued(mirror::Object* ref) const {
-  // Since the references are stored as cyclic lists it means that once enqueued, the pending next
-  // will always be non-null.
-  return ref->GetFieldObject<mirror::Object>(GetReferencePendingNextOffset(), false) != nullptr;
-}
-
-bool Heap::IsEnqueuable(mirror::Object* ref) const {
-  DCHECK(ref != nullptr);
-  const mirror::Object* queue =
-      ref->GetFieldObject<mirror::Object>(GetReferenceQueueOffset(), false);
-  const mirror::Object* queue_next =
-      ref->GetFieldObject<mirror::Object>(GetReferenceQueueNextOffset(), false);
-  return queue != nullptr && queue_next == nullptr;
-}
-
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
 // marked, put it on the appropriate list in the heap for later processing.
-void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
+void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
                                   IsMarkedCallback is_marked_callback, void* arg) {
-  DCHECK(klass != nullptr);
-  DCHECK(klass->IsReferenceClass());
-  DCHECK(obj != nullptr);
-  mirror::Object* referent = GetReferenceReferent(obj);
+  DCHECK_EQ(klass, ref->GetClass());
+  mirror::Object* referent = ref->GetReferent();
   if (referent != nullptr) {
     mirror::Object* forward_address = is_marked_callback(referent, arg);
     // Null means that the object is not currently marked.
@@ -824,20 +798,20 @@
       // We need to check that the references haven't already been enqueued since we can end up
       // scanning the same reference multiple times due to dirty cards.
       if (klass->IsSoftReferenceClass()) {
-        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
       } else if (klass->IsWeakReferenceClass()) {
-        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
       } else if (klass->IsFinalizerReferenceClass()) {
-        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
       } else if (klass->IsPhantomReferenceClass()) {
-        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
       } else {
         LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
                    << klass->GetAccessFlags();
       }
     } else if (referent != forward_address) {
       // Referent is already marked and we need to update it.
-      SetReferenceReferent(obj, forward_address);
+      ref->SetReferent<false>(forward_address);
     }
   }
 }
@@ -2013,8 +1987,9 @@
     VerifyReferenceVisitor visitor(heap_);
     // The class doesn't count as a reference but we should verify it anyways.
     collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
-    if (obj->GetClass()->IsReferenceClass()) {
-      visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
+    if (obj->IsReferenceInstance()) {
+      mirror::Reference* ref = obj->AsReference();
+      visitor(obj, ref->GetReferent(), mirror::Reference::ReferentOffset(), false);
     }
     failed_ = failed_ || visitor.Failed();
   }
@@ -2476,35 +2451,6 @@
   non_moving_space_->ClearGrowthLimit();
 }
 
-void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
-                               MemberOffset reference_queue_offset,
-                               MemberOffset reference_queueNext_offset,
-                               MemberOffset reference_pendingNext_offset,
-                               MemberOffset finalizer_reference_zombie_offset) {
-  reference_referent_offset_ = reference_referent_offset;
-  reference_queue_offset_ = reference_queue_offset;
-  reference_queueNext_offset_ = reference_queueNext_offset;
-  reference_pendingNext_offset_ = reference_pendingNext_offset;
-  finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
-  CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
-  CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
-  CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
-  CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
-  CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
-}
-
-void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
-  DCHECK(reference != NULL);
-  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
-  reference->SetFieldObject<false, false>(reference_referent_offset_, referent, true);
-}
-
-mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
-  DCHECK(reference != NULL);
-  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
-  return reference->GetFieldObject<mirror::Object>(reference_referent_offset_, true);
-}
-
 void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
   ScopedObjectAccess soa(self);
   JValue result;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 12c55c4..1e0a596 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -312,26 +312,6 @@
     return discontinuous_spaces_;
   }
 
-  void SetReferenceOffsets(MemberOffset reference_referent_offset,
-                           MemberOffset reference_queue_offset,
-                           MemberOffset reference_queueNext_offset,
-                           MemberOffset reference_pendingNext_offset,
-                           MemberOffset finalizer_reference_zombie_offset);
-  MemberOffset GetReferenceReferentOffset() const {
-    return reference_referent_offset_;
-  }
-  MemberOffset GetReferenceQueueOffset() const {
-    return reference_queue_offset_;
-  }
-  MemberOffset GetReferenceQueueNextOffset() const {
-    return reference_queueNext_offset_;
-  }
-  MemberOffset GetReferencePendingNextOffset() const {
-    return reference_pendingNext_offset_;
-  }
-  MemberOffset GetFinalizerReferenceZombieOffset() const {
-    return finalizer_reference_zombie_offset_;
-  }
   static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
   void ProcessReferences(TimingLogger& timings, bool clear_soft,
                          IsMarkedCallback* is_marked_callback,
@@ -624,20 +604,9 @@
   bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Pushes a list of cleared references out to the managed heap.
-  void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* GetReferenceReferent(mirror::Object* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ClearReferenceReferent(mirror::Object* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    SetReferenceReferent(reference, nullptr);
-  }
   void EnqueueClearedReferences();
   // Returns true if the reference object has not yet been enqueued.
-  bool IsEnqueuable(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsEnqueued(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
+  void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
                               IsMarkedCallback is_marked_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -918,17 +887,6 @@
   // Temp space is the space which the semispace collector copies to.
   space::BumpPointerSpace* temp_space_;
 
-  // offset of java.lang.ref.Reference.referent
-  MemberOffset reference_referent_offset_;
-  // offset of java.lang.ref.Reference.queue
-  MemberOffset reference_queue_offset_;
-  // offset of java.lang.ref.Reference.queueNext
-  MemberOffset reference_queueNext_offset_;
-  // offset of java.lang.ref.Reference.pendingNext
-  MemberOffset reference_pendingNext_offset_;
-  // offset of java.lang.ref.FinalizerReference.zombie
-  MemberOffset finalizer_reference_zombie_offset_;
-
   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
   // utilization, regardless of target utilization ratio.
   size_t min_free_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 203701f..aee7891 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -20,91 +20,84 @@
 #include "heap.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
 
 namespace art {
 namespace gc {
 
-ReferenceQueue::ReferenceQueue(Heap* heap)
+ReferenceQueue::ReferenceQueue()
     : lock_("reference queue lock"),
-      heap_(heap),
       list_(nullptr) {
 }
 
-void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref) {
+void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
   DCHECK(ref != NULL);
   MutexLock mu(self, lock_);
-  if (!heap_->IsEnqueued(ref)) {
+  if (!ref->IsEnqueued()) {
     EnqueuePendingReference(ref);
   }
 }
 
-void ReferenceQueue::EnqueueReference(mirror::Object* ref) {
-  CHECK(heap_->IsEnqueuable(ref));
+void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
+  CHECK(ref->IsEnqueuable());
   EnqueuePendingReference(ref);
 }
 
-void ReferenceQueue::EnqueuePendingReference(mirror::Object* ref) {
+void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
   DCHECK(ref != NULL);
-  MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
-  DCHECK_NE(pending_next_offset.Uint32Value(), 0U);
   if (IsEmpty()) {
     // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
-    if (Runtime::Current()->IsActiveTransaction()) {
-      ref->SetFieldObject<true>(pending_next_offset, ref, false);
-    } else {
-      ref->SetFieldObject<false>(pending_next_offset, ref, false);
-    }
     list_ = ref;
   } else {
-    mirror::Object* head = list_->GetFieldObject<mirror::Object>(pending_next_offset, false);
+    mirror::Reference* head = list_->GetPendingNext();
     if (Runtime::Current()->IsActiveTransaction()) {
-      ref->SetFieldObject<true>(pending_next_offset, head, false);
-      list_->SetFieldObject<true>(pending_next_offset, ref, false);
+      ref->SetPendingNext<true>(head);
     } else {
-      ref->SetFieldObject<false>(pending_next_offset, head, false);
-      list_->SetFieldObject<false>(pending_next_offset, ref, false);
+      ref->SetPendingNext<false>(head);
     }
   }
+  if (Runtime::Current()->IsActiveTransaction()) {
+    list_->SetPendingNext<true>(ref);
+  } else {
+    list_->SetPendingNext<false>(ref);
+  }
 }
 
-mirror::Object* ReferenceQueue::DequeuePendingReference() {
+mirror::Reference* ReferenceQueue::DequeuePendingReference() {
   DCHECK(!IsEmpty());
-  MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
-  mirror::Object* head = list_->GetFieldObject<mirror::Object>(pending_next_offset, false);
+  mirror::Reference* head = list_->GetPendingNext();
   DCHECK(head != nullptr);
-  mirror::Object* ref;
+  mirror::Reference* ref;
   // Note: the following code is thread-safe because it is only called from ProcessReferences which
   // is single threaded.
   if (list_ == head) {
     ref = list_;
     list_ = nullptr;
   } else {
-    mirror::Object* next = head->GetFieldObject<mirror::Object>(pending_next_offset, false);
+    mirror::Reference* next = head->GetPendingNext();
     if (Runtime::Current()->IsActiveTransaction()) {
-      list_->SetFieldObject<true>(pending_next_offset, next, false);
+      list_->SetPendingNext<true>(next);
     } else {
-      list_->SetFieldObject<false>(pending_next_offset, next, false);
+      list_->SetPendingNext<false>(next);
     }
     ref = head;
   }
   if (Runtime::Current()->IsActiveTransaction()) {
-    ref->SetFieldObject<true>(pending_next_offset, nullptr, false);
+    ref->SetPendingNext<true>(nullptr);
   } else {
-    ref->SetFieldObject<false>(pending_next_offset, nullptr, false);
+    ref->SetPendingNext<false>(nullptr);
   }
   return ref;
 }
 
 void ReferenceQueue::Dump(std::ostream& os) const {
-  mirror::Object* cur = list_;
+  mirror::Reference* cur = list_;
   os << "Reference starting at list_=" << list_ << "\n";
   while (cur != nullptr) {
-    mirror::Object* pending_next =
-        cur->GetFieldObject<mirror::Object>(heap_->GetReferencePendingNextOffset(), false);
+    mirror::Reference* pending_next = cur->GetPendingNext();
     os << "PendingNext=" << pending_next;
-    if (cur->GetClass()->IsFinalizerReferenceClass()) {
-      os << " Zombie=" <<
-          cur->GetFieldObject<mirror::Object>(heap_->GetFinalizerReferenceZombieOffset(), false);
+    if (cur->IsFinalizerReferenceInstance()) {
+      os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
     }
     os << "\n";
     cur = pending_next;
@@ -115,19 +108,23 @@
                                           IsMarkedCallback* preserve_callback,
                                           void* arg) {
   while (!IsEmpty()) {
-    mirror::Object* ref = DequeuePendingReference();
-    mirror::Object* referent = heap_->GetReferenceReferent(ref);
+    mirror::Reference* ref = DequeuePendingReference();
+    mirror::Object* referent = ref->GetReferent();
     if (referent != nullptr) {
       mirror::Object* forward_address = preserve_callback(referent, arg);
       if (forward_address == nullptr) {
         // Referent is white, clear it.
-        heap_->ClearReferenceReferent(ref);
-        if (heap_->IsEnqueuable(ref)) {
+        if (Runtime::Current()->IsActiveTransaction()) {
+          ref->ClearReferent<true>();
+        } else {
+          ref->ClearReferent<false>();
+        }
+        if (ref->IsEnqueuable()) {
           cleared_references.EnqueuePendingReference(ref);
         }
       } else if (referent != forward_address) {
         // Object moved, need to updated the referent.
-        heap_->SetReferenceReferent(ref, forward_address);
+        ref->SetReferent<false>(forward_address);
       }
     }
   }
@@ -138,42 +135,43 @@
                                                 MarkObjectCallback recursive_mark_callback,
                                                 void* arg) {
   while (!IsEmpty()) {
-    mirror::Object* ref = DequeuePendingReference();
-    mirror::Object* referent = heap_->GetReferenceReferent(ref);
+    mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
+    mirror::Object* referent = ref->GetReferent();
     if (referent != nullptr) {
       mirror::Object* forward_address = is_marked_callback(referent, arg);
       // If the referent isn't marked, mark it and update the
       if (forward_address == nullptr) {
         forward_address = recursive_mark_callback(referent, arg);
         // If the referent is non-null the reference must queuable.
-        DCHECK(heap_->IsEnqueuable(ref));
+        DCHECK(ref->IsEnqueuable());
         // Move the updated referent to the zombie field.
         if (Runtime::Current()->IsActiveTransaction()) {
-          ref->SetFieldObject<true>(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false);
+          ref->SetZombie<true>(forward_address);
+          ref->ClearReferent<true>();
         } else {
-          ref->SetFieldObject<false>(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false);
+          ref->SetZombie<false>(forward_address);
+          ref->ClearReferent<false>();
         }
-        heap_->ClearReferenceReferent(ref);
         cleared_references.EnqueueReference(ref);
       } else if (referent != forward_address) {
-        heap_->SetReferenceReferent(ref, forward_address);
+        ref->SetReferent<false>(forward_address);
       }
     }
   }
 }
 
 void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback preserve_callback, void* arg) {
-  ReferenceQueue cleared(heap_);
+  ReferenceQueue cleared;
   while (!IsEmpty()) {
-    mirror::Object* ref = DequeuePendingReference();
-    mirror::Object* referent = heap_->GetReferenceReferent(ref);
+    mirror::Reference* ref = DequeuePendingReference();
+    mirror::Object* referent = ref->GetReferent();
     if (referent != nullptr) {
       mirror::Object* forward_address = preserve_callback(referent, arg);
       if (forward_address == nullptr) {
         // Either the reference isn't marked or we don't wish to preserve it.
         cleared.EnqueuePendingReference(ref);
       } else if (forward_address != referent) {
-        heap_->SetReferenceReferent(ref, forward_address);
+        ref->SetReferent<false>(forward_address);
       }
     }
   }
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 99314ba..8d392ba 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -31,6 +31,10 @@
 #include "thread_pool.h"
 
 namespace art {
+namespace mirror {
+class Reference;
+}  // namespace mirror
+
 namespace gc {
 
 class Heap;
@@ -40,18 +44,18 @@
 // java.lang.ref.Reference objects.
 class ReferenceQueue {
  public:
-  explicit ReferenceQueue(Heap* heap);
+  explicit ReferenceQueue();
   // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
   // since it uses a lock to avoid a race between checking for the references presence and adding
   // it.
-  void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref)
+  void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
   // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
   // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
   // overhead.
-  void EnqueueReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void EnqueuePendingReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Enqueues finalizer references with white referents.  White referents are blackened, moved to the
   // zombie field, and the referent field is cleared.
   void EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
@@ -76,7 +80,7 @@
   void Clear() {
     list_ = nullptr;
   }
-  mirror::Object* GetList() {
+  mirror::Reference* GetList() {
     return list_;
   }
 
@@ -84,10 +88,8 @@
   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
   // calling AtomicEnqueueIfNotEnqueued.
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  // The heap contains the reference offsets.
-  Heap* const heap_;
   // The actual reference list. Not a root since it will be nullptr when the GC is not running.
-  mirror::Object* list_;
+  mirror::Reference* list_;
 };
 
 }  // namespace gc
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 484c21a..cad1017 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -27,6 +27,7 @@
 #include "lock_word-inl.h"
 #include "monitor.h"
 #include "runtime.h"
+#include "reference.h"
 #include "throwable.h"
 
 namespace art {
@@ -197,6 +198,12 @@
 }
 
 template<VerifyObjectFlags kVerifyFlags>
+inline Reference* Object::AsReference() {
+  DCHECK(IsReferenceInstance<kVerifyFlags>());
+  return down_cast<Reference*>(this);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
 inline Array* Object::AsArray() {
   DCHECK(IsArrayInstance<kVerifyFlags>());
   return down_cast<Array*>(this);
@@ -314,6 +321,12 @@
 }
 
 template<VerifyObjectFlags kVerifyFlags>
+inline FinalizerReference* Object::AsFinalizerReference() {
+  DCHECK(IsFinalizerReferenceInstance<kVerifyFlags>());
+  return down_cast<FinalizerReference*>(this);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::IsPhantomReferenceInstance() {
   return GetClass<kVerifyFlags>()->IsPhantomReferenceClass();
 }
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 4e2c624..476259f 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -41,6 +41,7 @@
 class ArtMethod;
 class Array;
 class Class;
+class FinalizerReference;
 template<class T> class ObjectArray;
 template<class T> class PrimitiveArray;
 typedef PrimitiveArray<uint8_t> BooleanArray;
@@ -51,6 +52,7 @@
 typedef PrimitiveArray<int32_t> IntArray;
 typedef PrimitiveArray<int64_t> LongArray;
 typedef PrimitiveArray<int16_t> ShortArray;
+class Reference;
 class String;
 class Throwable;
 
@@ -170,12 +172,16 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  Reference* AsReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  FinalizerReference* AsFinalizerReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Accessor for Java type fields.
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
new file mode 100644
index 0000000..0f76f77
--- /dev/null
+++ b/runtime/mirror/reference-inl.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_REFERENCE_INL_H_
+#define ART_RUNTIME_MIRROR_REFERENCE_INL_H_
+
+#include "reference.h"
+
+namespace art {
+namespace mirror {
+
+inline bool Reference::IsEnqueuable() {
+  // Not using volatile reads as an optimization since this is only called with all the mutators
+  // suspended.
+  const Object* queue = GetFieldObject<mirror::Object>(QueueOffset(), false);
+  const Object* queue_next = GetFieldObject<mirror::Object>(QueueNextOffset(), false);
+  return queue != nullptr && queue_next == nullptr;
+}
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_REFERENCE_INL_H_
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
new file mode 100644
index 0000000..c2a83ff
--- /dev/null
+++ b/runtime/mirror/reference.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
+#define ART_RUNTIME_MIRROR_REFERENCE_H_
+
+#include "object.h"
+
+namespace art {
+
+struct ReferenceOffsets;
+struct FinalizerReferenceOffsets;
+
+namespace mirror {
+
+// C++ mirror of java.lang.ref.Reference
+class MANAGED Reference : public Object {
+ public:
+  static MemberOffset PendingNextOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(Reference, pending_next_);
+  }
+  static MemberOffset QueueOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(Reference, queue_);
+  }
+  static MemberOffset QueueNextOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(Reference, queue_next_);
+  }
+  static MemberOffset ReferentOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
+  }
+
+  Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<Object>(ReferentOffset(), true);
+  }
+  template<bool kTransactionActive>
+  void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetFieldObject<kTransactionActive>(ReferentOffset(), referent, true);
+  }
+  template<bool kTransactionActive>
+  void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetFieldObject<kTransactionActive>(ReferentOffset(), nullptr, true);
+  }
+
+  // Volatile read/write is not necessary since the java pending next is only accessed from
+  // the java threads for cleared references. Once these cleared references have a null referent,
+  // we never end up reading their pending next from the GC again.
+  Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<Reference>(PendingNextOffset(), false);
+  }
+  template<bool kTransactionActive>
+  void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next, false);
+  }
+
+  bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    // Since the references are stored as cyclic lists it means that once enqueued, the pending
+    // next is always non-null.
+    return GetPendingNext() != nullptr;
+  }
+
+  bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+  // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
+  HeapReference<Reference> pending_next_;  // Note this is Java volatile:
+  HeapReference<Object> queue_;  // Note this is Java volatile:
+  HeapReference<Reference> queue_next_;  // Note this is Java volatile:
+  HeapReference<Object> referent_;  // Note this is Java volatile:
+
+  friend struct art::ReferenceOffsets;  // for verifying offset information
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Reference);
+};
+
+// C++ mirror of java.lang.ref.FinalizerReference
+class MANAGED FinalizerReference : public Reference {
+ public:
+  static MemberOffset ZombieOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(FinalizerReference, zombie_);
+  }
+
+  template<bool kTransactionActive>
+  void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return SetFieldObject<kTransactionActive>(ZombieOffset(), zombie, true);
+  }
+  Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<Object>(ZombieOffset(), true);
+  }
+
+ private:
+  HeapReference<FinalizerReference> next_;
+  HeapReference<FinalizerReference> prev_;
+  HeapReference<Object> zombie_;
+
+  friend struct art::FinalizerReferenceOffsets;  // for verifying offset information
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FinalizerReference);
+};
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_REFERENCE_H_