Fix and optimize verify object.

VerifyObject no longer resides in heap. You can now enable
VerifyObject for non-debug builds. VerifyStack is still slow, so it
is now guarded by its own flag.

Fixed the image writer to not use verification at places where
verification fails due to invalid reads.

Fixed RosAlloc to use SizeOf which doesn't call verify object.

Added a flag paremeter to some of the mirror getters / setters to
be able to selectively disable VerifyObject on certain calls.

Optimized the GC to not verify each object multiple times during
object scanning if verify object is enabled.

Added 3 verification options: verify reads, verify this, and verify
writes so that you can select how much verification you want for
mirror getters and setters.

Removed some useless DCHECKs which would slow debug builds without
providing any benefits.

TODO: RosAlloc verification doesn't currently work with verify
objects.

Bug: 12934910
Bug: 12879358

Change-Id: Ic61033104dfc334543f89b0fc0ad8cd4f4015d69
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index d148ae5..4915532 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -30,19 +30,19 @@
 
 template <typename MarkVisitor>
 inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor) {
-  DCHECK(obj != NULL);
   if (kIsDebugBuild && !IsMarked(obj)) {
     heap_->DumpSpaces();
     LOG(FATAL) << "Scanning unmarked object " << obj;
   }
+  // The GetClass verifies the object, don't need to reverify after.
   mirror::Class* klass = obj->GetClass();
-  DCHECK(klass != NULL);
+  // IsArrayClass verifies klass.
   if (UNLIKELY(klass->IsArrayClass())) {
     if (kCountScannedTypes) {
       ++array_count_;
     }
-    if (klass->IsObjectArrayClass()) {
-      VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
+    if (klass->IsObjectArrayClass<kVerifyNone>()) {
+      VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object, kVerifyNone>(), visitor);
     }
   } else if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
     if (kCountScannedTypes) {
@@ -54,7 +54,7 @@
       ++other_count_;
     }
     VisitOtherReferences(klass, obj, visitor);
-    if (UNLIKELY(klass->IsReferenceClass())) {
+    if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
       DelayReferenceReferent(klass, obj);
     }
   }
@@ -65,24 +65,19 @@
                                              bool visit_class)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                           Locks::mutator_lock_) {
-  DCHECK(obj != NULL);
-  DCHECK(obj->GetClass() != NULL);
   mirror::Class* klass = obj->GetClass();
-  DCHECK(klass != NULL);
-  if (klass == mirror::Class::GetJavaLangClass()) {
-    DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
+  if (klass->IsArrayClass()) {
+    if (visit_class) {
+      visitor(obj, klass, mirror::Object::ClassOffset(), false);
+    }
+    if (klass->IsObjectArrayClass<kVerifyNone>()) {
+      VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object, kVerifyNone>(), visitor);
+    }
+  } else if (klass == mirror::Class::GetJavaLangClass()) {
+    DCHECK_EQ(klass->GetClass<kVerifyNone>(), mirror::Class::GetJavaLangClass());
     VisitClassReferences(klass, obj, visitor);
   } else {
-    if (klass->IsArrayClass()) {
-      if (visit_class) {
-        visitor(obj, klass, mirror::Object::ClassOffset(), false);
-      }
-      if (klass->IsObjectArrayClass()) {
-        VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
-      }
-    } else {
-      VisitOtherReferences(klass, obj, visitor);
-    }
+    VisitOtherReferences(klass, obj, visitor);
   }
 }
 
@@ -90,9 +85,7 @@
 inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
                                                      const Visitor& visitor)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-  DCHECK(obj != NULL);
-  DCHECK(klass != NULL);
-  VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor);
+  VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets<kVerifyNone>(), false, visitor);
 }
 
 template <typename Visitor>
@@ -100,14 +93,13 @@
                                             const Visitor& visitor)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
   VisitInstanceFieldsReferences(klass, obj, visitor);
-  VisitStaticFieldsReferences(obj->AsClass(), visitor);
+  VisitStaticFieldsReferences(obj->AsClass<kVerifyNone>(), visitor);
 }
 
 template <typename Visitor>
 inline void MarkSweep::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-  DCHECK(klass != NULL);
-  VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
+  VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets<kVerifyNone>(), true, visitor);
 }
 
 template <typename Visitor>
@@ -118,7 +110,7 @@
     while (ref_offsets != 0) {
       size_t right_shift = CLZ(ref_offsets);
       MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
-      mirror::Object* ref = obj->GetFieldObject<mirror::Object>(field_offset, false);
+      mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyReads>(field_offset, false);
       visitor(obj, ref, field_offset, is_static);
       ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
     }
@@ -127,7 +119,7 @@
     // walk up the class inheritance hierarchy and find reference
     // offsets the hard way. In the static case, just consider this
     // class.
-    for (mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+    for (mirror::Class* klass = is_static ? obj->AsClass<kVerifyNone>() : obj->GetClass<kVerifyNone>();
          klass != nullptr;
          klass = is_static ? nullptr : klass->GetSuperClass()) {
       size_t num_reference_fields = (is_static
@@ -137,7 +129,7 @@
         mirror::ArtField* field = (is_static ? klass->GetStaticField(i)
                                              : klass->GetInstanceField(i));
         MemberOffset field_offset = field->GetOffset();
-        mirror::Object* ref = obj->GetFieldObject<mirror::Object>(field_offset, false);
+        mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyReads>(field_offset, false);
         visitor(obj, ref, field_offset, is_static);
       }
     }
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7b9d675..fb797e0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -533,15 +533,11 @@
 
 void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
                                          RootType /*root_type*/) {
-  DCHECK(root != NULL);
-  DCHECK(arg != NULL);
   reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
 }
 
 void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
                                  RootType /*root_type*/) {
-  DCHECK(root != nullptr);
-  DCHECK(arg != nullptr);
   reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
 }
 
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 882867b..fe8c253 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -666,11 +666,11 @@
       // case since it does not dirty cards and use additional memory.
       // Since we do not change the actual object, we can safely use non-transactional mode. Also
       // disable check as we could run inside a transaction.
-      obj->SetFieldObjectWithoutWriteBarrier<false, false>(offset, new_address, false);
+      obj->SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(offset, new_address, false);
     }
   }, kMovingClasses);
-  mirror::Class* klass = obj->GetClass();
-  if (UNLIKELY(klass->IsReferenceClass())) {
+  mirror::Class* klass = obj->GetClass<kVerifyNone>();
+  if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
     DelayReferenceReferent(klass, obj);
   }
 }
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 9c91b0e..3d591f0 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -28,6 +28,7 @@
 #include "runtime.h"
 #include "thread.h"
 #include "thread-inl.h"
+#include "verify_object-inl.h"
 
 namespace art {
 namespace gc {
@@ -98,12 +99,8 @@
   if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) {
     CheckConcurrentGC(self, new_num_bytes_allocated, obj);
   }
-  if (kIsDebugBuild) {
-    if (kDesiredHeapVerification > kNoHeapVerification) {
-      VerifyObject(obj);
-    }
-    self->VerifyStack();
-  }
+  VerifyObject(obj);
+  self->VerifyStack();
   return obj;
 }
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8c89cdc..b970df3 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -135,7 +135,7 @@
        * searching.
        */
       max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
-          : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
+          : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
       current_allocator_(kAllocatorTypeDlMalloc),
       current_non_moving_allocator_(kAllocatorTypeNonMoving),
       bump_pointer_space_(nullptr),
@@ -150,7 +150,7 @@
       target_utilization_(target_utilization),
       total_wait_time_(0),
       total_allocation_time_(0),
-      verify_object_mode_(kHeapVerificationNotPermitted),
+      verify_object_mode_(kVerifyObjectModeDisabled),
       disable_moving_gc_count_(0),
       running_on_valgrind_(RUNNING_ON_VALGRIND),
       use_tlab_(use_tlab) {
@@ -314,9 +314,7 @@
 
 bool Heap::IsCompilingBoot() const {
   for (const auto& space : continuous_spaces_) {
-    if (space->IsImageSpace()) {
-      return false;
-    } else if (space->IsZygoteSpace()) {
+    if (space->IsImageSpace() || space->IsZygoteSpace()) {
       return false;
     }
   }
@@ -823,14 +821,16 @@
     return false;
   }
   if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
-    mirror::Class* klass = obj->GetClass();
+    mirror::Class* klass = obj->GetClass<kVerifyNone>();
     if (obj == klass) {
       // This case happens for java.lang.Class.
       return true;
     }
     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
   } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
-    return false;
+    // If we are in the allocated region of the temp space, then we are probably live (e.g. during
+    // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
+    return temp_space_->Contains(obj);
   }
   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
   space::DiscontinuousSpace* d_space = NULL;
@@ -886,25 +886,6 @@
   return false;
 }
 
-void Heap::VerifyObjectImpl(mirror::Object* obj) {
-  if (Thread::Current() == NULL ||
-      Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
-    return;
-  }
-  VerifyObjectBody(obj);
-}
-
-bool Heap::VerifyClassClass(const mirror::Class* c) const {
-  // Note: we don't use the accessors here as they have internal sanity checks that we don't want
-  // to run
-  const byte* raw_addr =
-      reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
-  mirror::Class* c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
-  raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
-  mirror::Class* c_c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
-  return c_c == c_c_c;
-}
-
 void Heap::DumpSpaces(std::ostream& stream) {
   for (const auto& space : continuous_spaces_) {
     accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
@@ -923,36 +904,30 @@
 }
 
 void Heap::VerifyObjectBody(mirror::Object* obj) {
-  CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
+  if (this == nullptr && verify_object_mode_ == kVerifyObjectModeDisabled) {
+    return;
+  }
   // Ignore early dawn of the universe verifications.
   if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
     return;
   }
-  const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
-      mirror::Object::ClassOffset().Int32Value();
-  mirror::Class* c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
-  if (UNLIKELY(c == NULL)) {
-    LOG(FATAL) << "Null class in object: " << obj;
-  } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
-    LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
-  }
+  CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
+  mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(
+      mirror::Object::ClassOffset(), false);
+  CHECK(c != nullptr) << "Null class in object " << obj;
+  CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
   CHECK(VerifyClassClass(c));
 
-  if (verify_object_mode_ > kVerifyAllFast) {
-    // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
-    //       heap_bitmap_lock_.
+  if (verify_object_mode_ > kVerifyObjectModeFast) {
+    // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
     if (!IsLiveObjectLocked(obj)) {
       DumpSpaces();
       LOG(FATAL) << "Object is dead: " << obj;
     }
-    if (!IsLiveObjectLocked(c)) {
-      LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
-    }
   }
 }
 
 void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
-  DCHECK(obj != NULL);
   reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
 }
 
@@ -1790,7 +1765,7 @@
 
       if (bitmap == nullptr) {
         LOG(ERROR) << "Object " << obj << " has no bitmap";
-        if (!heap_->VerifyClassClass(obj->GetClass())) {
+        if (!VerifyClassClass(obj->GetClass())) {
           LOG(ERROR) << "Object " << obj << " failed class verification!";
         }
       } else {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 21a2365..83202a5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -37,6 +37,7 @@
 #include "reference_queue.h"
 #include "safe_map.h"
 #include "thread_pool.h"
+#include "verify_object.h"
 
 namespace art {
 
@@ -99,15 +100,6 @@
   kAllocatorTypeLOS,  // Large object space, also doesn't have entrypoints.
 };
 
-// How we want to sanity check the heap's correctness.
-enum HeapVerificationMode {
-  kHeapVerificationNotPermitted,  // Too early in runtime start-up for heap to be verified.
-  kNoHeapVerification,  // Production default.
-  kVerifyAllFast,  // Sanity check all heap accesses with quick(er) tests.
-  kVerifyAll  // Sanity check all heap accesses.
-};
-static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
-
 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
 static constexpr bool kUseRosAlloc = true;
 
@@ -208,14 +200,9 @@
   void ChangeCollector(CollectorType collector_type);
 
   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
-  void VerifyObjectImpl(mirror::Object* o);
-  void VerifyObject(mirror::Object* o) {
-    if (o != nullptr && this != nullptr && verify_object_mode_ > kNoHeapVerification) {
-      VerifyObjectImpl(o);
-    }
-  }
-  // Check that c.getClass() == c.getClass().getClass().
-  bool VerifyClassClass(const mirror::Class* c) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
+  // proper lock ordering for it.
+  void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
 
   // Check sanity of all live references.
   void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
@@ -347,21 +334,20 @@
 
   // Enable verification of object references when the runtime is sufficiently initialized.
   void EnableObjectValidation() {
-    verify_object_mode_ = kDesiredHeapVerification;
-    if (verify_object_mode_ > kNoHeapVerification) {
+    verify_object_mode_ = kVerifyObjectSupport;
+    if (verify_object_mode_ > kVerifyObjectModeDisabled) {
       VerifyHeap();
     }
   }
 
   // Disable object reference verification for image writing.
   void DisableObjectValidation() {
-    verify_object_mode_ = kHeapVerificationNotPermitted;
+    verify_object_mode_ = kVerifyObjectModeDisabled;
   }
 
   // Other checks may be performed if we know the heap should be in a sane state.
   bool IsObjectValidationEnabled() const {
-    return kDesiredHeapVerification > kNoHeapVerification &&
-        verify_object_mode_ > kHeapVerificationNotPermitted;
+    return verify_object_mode_ > kVerifyObjectModeDisabled;
   }
 
   // Returns true if low memory mode is enabled.
@@ -665,10 +651,6 @@
       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
   void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
 
-  // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
-  // lock ordering for it.
-  void VerifyObjectBody(mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS;
-
   static void VerificationCallback(mirror::Object* obj, void* arg)
       SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
 
@@ -916,7 +898,7 @@
   AtomicInteger total_allocation_time_;
 
   // The current state of heap verification, may be enabled or disabled.
-  HeapVerificationMode verify_object_mode_;
+  VerifyObjectMode verify_object_mode_;
 
   // Compacting GC disable count, prevents compacting GC from running iff > 0.
   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index f90e6c7..ee31112 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -111,7 +111,8 @@
 }
 
 void MallocSpace::RegisterRecentFree(mirror::Object* ptr) {
-  recent_freed_objects_[recent_free_pos_] = std::make_pair(ptr, ptr->GetClass());
+  // No verification since the object is dead.
+  recent_freed_objects_[recent_free_pos_] = std::make_pair(ptr, ptr->GetClass<kVerifyNone>());
   recent_free_pos_ = (recent_free_pos_ + 1) & kRecentFreeMask;
 }
 
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 2377423..72e84f6 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -60,7 +60,8 @@
     // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
     void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
     // obj is a valid object. Use its class in the header to get the size.
-    size_t size = obj->SizeOf();
+    // Don't use verification since the object may be dead if we are sweeping.
+    size_t size = obj->SizeOf<kVerifyNone>();
     size_t size_by_size = rosalloc_->UsableSize(size);
     if (kIsDebugBuild) {
       size_t size_by_ptr = rosalloc_->UsableSize(obj_ptr);
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 6d07a60..0b9f7ad 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -38,18 +38,20 @@
     Runtime::Current()->GetHeap()->RevokeAllThreadLocalBuffers();
     Runtime::Current()->GetHeap()->AddSpace(space);
   }
-  void InstallClass(mirror::Object* o, size_t size) NO_THREAD_SAFETY_ANALYSIS {
+  void InstallClass(SirtRef<mirror::Object>& o, size_t size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Note the minimum size, which is the size of a zero-length byte array.
     EXPECT_GE(size, SizeOfZeroLengthByteArray());
-    SirtRef<mirror::ClassLoader> null_loader(Thread::Current(), NULL);
-    mirror::Class* byte_array_class = Runtime::Current()->GetClassLinker()->FindClass("[B", null_loader);
-    EXPECT_TRUE(byte_array_class != NULL);
+    SirtRef<mirror::ClassLoader> null_loader(Thread::Current(), nullptr);
+    mirror::Class* byte_array_class = Runtime::Current()->GetClassLinker()->FindClass("[B",
+                                                                                      null_loader);
+    EXPECT_TRUE(byte_array_class != nullptr);
     o->SetClass(byte_array_class);
-    mirror::Array* arr = o->AsArray();
+    mirror::Array* arr = o->AsArray<kVerifyNone>();
     size_t header_size = SizeOfZeroLengthByteArray();
     int32_t length = size - header_size;
     arr->SetLength(length);
-    EXPECT_EQ(arr->SizeOf(), size);
+    EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
   }
 
   static size_t SizeOfZeroLengthByteArray() {
@@ -86,38 +88,38 @@
 void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
   {
     // Init < max == growth
-    UniquePtr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, NULL));
-    EXPECT_TRUE(space.get() != NULL);
+    UniquePtr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
+    EXPECT_TRUE(space.get() != nullptr);
   }
   {
     // Init == max == growth
-    UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, NULL));
-    EXPECT_TRUE(space.get() != NULL);
+    UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
+    EXPECT_TRUE(space.get() != nullptr);
   }
   {
     // Init > max == growth
-    UniquePtr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, NULL));
-    EXPECT_TRUE(space.get() == NULL);
+    UniquePtr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
+    EXPECT_TRUE(space.get() == nullptr);
   }
   {
     // Growth == init < max
-    UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, NULL));
-    EXPECT_TRUE(space.get() != NULL);
+    UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
+    EXPECT_TRUE(space.get() != nullptr);
   }
   {
     // Growth < init < max
-    UniquePtr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, NULL));
-    EXPECT_TRUE(space.get() == NULL);
+    UniquePtr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
+    EXPECT_TRUE(space.get() == nullptr);
   }
   {
     // Init < growth < max
-    UniquePtr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, NULL));
-    EXPECT_TRUE(space.get() != NULL);
+    UniquePtr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
+    EXPECT_TRUE(space.get() != nullptr);
   }
   {
     // Init < max < growth
-    UniquePtr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, NULL));
-    EXPECT_TRUE(space.get() == NULL);
+    UniquePtr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
+    EXPECT_TRUE(space.get() == nullptr);
   }
 }
 
@@ -134,52 +136,52 @@
 // the GC works with the ZygoteSpace.
 void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
   size_t dummy = 0;
-  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
-  ASSERT_TRUE(space != NULL);
+  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  ASSERT_TRUE(space != nullptr);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
   AddSpace(space);
   Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
 
   // Succeeds, fits without adjusting the footprint limit.
-  mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
-  EXPECT_TRUE(ptr1 != NULL);
+  SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy));
+  EXPECT_TRUE(ptr1.get() != nullptr);
   InstallClass(ptr1, 1 * MB);
 
   // Fails, requires a higher footprint limit.
   mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
-  EXPECT_TRUE(ptr2 == NULL);
+  EXPECT_TRUE(ptr2 == nullptr);
 
   // Succeeds, adjusts the footprint.
   size_t ptr3_bytes_allocated;
-  mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated);
-  EXPECT_TRUE(ptr3 != NULL);
+  SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated));
+  EXPECT_TRUE(ptr3.get() != nullptr);
   EXPECT_LE(8U * MB, ptr3_bytes_allocated);
   InstallClass(ptr3, 8 * MB);
 
   // Fails, requires a higher footprint limit.
   mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
-  EXPECT_TRUE(ptr4 == NULL);
+  EXPECT_TRUE(ptr4 == nullptr);
 
   // Also fails, requires a higher allowed footprint.
   mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
-  EXPECT_TRUE(ptr5 == NULL);
+  EXPECT_TRUE(ptr5 == nullptr);
 
   // Release some memory.
-  ScopedObjectAccess soa(self);
-  size_t free3 = space->AllocationSize(ptr3);
+  size_t free3 = space->AllocationSize(ptr3.get());
   EXPECT_EQ(free3, ptr3_bytes_allocated);
-  EXPECT_EQ(free3, space->Free(self, ptr3));
+  EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
   EXPECT_LE(8U * MB, free3);
 
   // Succeeds, now that memory has been freed.
-  mirror::Object* ptr6 = space->AllocWithGrowth(self, 9 * MB, &dummy);
-  EXPECT_TRUE(ptr6 != NULL);
+  SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy));
+  EXPECT_TRUE(ptr6.get() != nullptr);
   InstallClass(ptr6, 9 * MB);
 
   // Final clean up.
-  size_t free1 = space->AllocationSize(ptr1);
-  space->Free(self, ptr1);
+  size_t free1 = space->AllocationSize(ptr1.get());
+  space->Free(self, ptr1.reset(nullptr));
   EXPECT_LE(1U * MB, free1);
 
   // Make sure that the zygote space isn't directly at the start of the space.
@@ -199,23 +201,23 @@
   AddSpace(space);
 
   // Succeeds, fits without adjusting the footprint limit.
-  ptr1 = space->Alloc(self, 1 * MB, &dummy);
-  EXPECT_TRUE(ptr1 != NULL);
+  ptr1.reset(space->Alloc(self, 1 * MB, &dummy));
+  EXPECT_TRUE(ptr1.get() != nullptr);
   InstallClass(ptr1, 1 * MB);
 
   // Fails, requires a higher footprint limit.
   ptr2 = space->Alloc(self, 8 * MB, &dummy);
-  EXPECT_TRUE(ptr2 == NULL);
+  EXPECT_TRUE(ptr2 == nullptr);
 
   // Succeeds, adjusts the footprint.
-  ptr3 = space->AllocWithGrowth(self, 2 * MB, &dummy);
-  EXPECT_TRUE(ptr3 != NULL);
+  ptr3.reset(space->AllocWithGrowth(self, 2 * MB, &dummy));
+  EXPECT_TRUE(ptr3.get() != nullptr);
   InstallClass(ptr3, 2 * MB);
-  space->Free(self, ptr3);
+  space->Free(self, ptr3.reset(nullptr));
 
   // Final clean up.
-  free1 = space->AllocationSize(ptr1);
-  space->Free(self, ptr1);
+  free1 = space->AllocationSize(ptr1.get());
+  space->Free(self, ptr1.reset(nullptr));
   EXPECT_LE(1U * MB, free1);
 }
 
@@ -229,52 +231,52 @@
 
 void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
   size_t dummy = 0;
-  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
-  ASSERT_TRUE(space != NULL);
+  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  ASSERT_TRUE(space != nullptr);
   Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
   AddSpace(space);
 
   // Succeeds, fits without adjusting the footprint limit.
-  mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
-  EXPECT_TRUE(ptr1 != NULL);
+  SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy));
+  EXPECT_TRUE(ptr1.get() != nullptr);
   InstallClass(ptr1, 1 * MB);
 
   // Fails, requires a higher footprint limit.
   mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
-  EXPECT_TRUE(ptr2 == NULL);
+  EXPECT_TRUE(ptr2 == nullptr);
 
   // Succeeds, adjusts the footprint.
   size_t ptr3_bytes_allocated;
-  mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated);
-  EXPECT_TRUE(ptr3 != NULL);
+  SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated));
+  EXPECT_TRUE(ptr3.get() != nullptr);
   EXPECT_LE(8U * MB, ptr3_bytes_allocated);
   InstallClass(ptr3, 8 * MB);
 
   // Fails, requires a higher footprint limit.
   mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
-  EXPECT_TRUE(ptr4 == NULL);
+  EXPECT_TRUE(ptr4 == nullptr);
 
   // Also fails, requires a higher allowed footprint.
   mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
-  EXPECT_TRUE(ptr5 == NULL);
+  EXPECT_TRUE(ptr5 == nullptr);
 
   // Release some memory.
-  ScopedObjectAccess soa(self);
-  size_t free3 = space->AllocationSize(ptr3);
+  size_t free3 = space->AllocationSize(ptr3.get());
   EXPECT_EQ(free3, ptr3_bytes_allocated);
-  space->Free(self, ptr3);
+  space->Free(self, ptr3.reset(nullptr));
   EXPECT_LE(8U * MB, free3);
 
   // Succeeds, now that memory has been freed.
-  mirror::Object* ptr6 = space->AllocWithGrowth(self, 9 * MB, &dummy);
-  EXPECT_TRUE(ptr6 != NULL);
+  SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy));
+  EXPECT_TRUE(ptr6.get() != nullptr);
   InstallClass(ptr6, 9 * MB);
 
   // Final clean up.
-  size_t free1 = space->AllocationSize(ptr1);
-  space->Free(self, ptr1);
+  size_t free1 = space->AllocationSize(ptr1.get());
+  space->Free(self, ptr1.reset(nullptr));
   EXPECT_LE(1U * MB, free1);
 }
 
@@ -288,11 +290,11 @@
 TEST_F(SpaceTest, LargeObjectTest) {
   size_t rand_seed = 0;
   for (size_t i = 0; i < 2; ++i) {
-    LargeObjectSpace* los = NULL;
+    LargeObjectSpace* los = nullptr;
     if (i == 0) {
       los = space::LargeObjectMapSpace::Create("large object space");
     } else {
-      los = space::FreeListSpace::Create("large object space", NULL, 128 * MB);
+      los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
     }
 
     static const size_t num_allocations = 64;
@@ -304,7 +306,7 @@
         size_t request_size = test_rand(&rand_seed) % max_allocation_size;
         size_t allocation_size = 0;
         mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size);
-        ASSERT_TRUE(obj != NULL);
+        ASSERT_TRUE(obj != nullptr);
         ASSERT_EQ(allocation_size, los->AllocationSize(obj));
         ASSERT_GE(allocation_size, request_size);
         // Fill in our magic value.
@@ -337,7 +339,7 @@
     size_t bytes_allocated = 0;
     // Checks that the coalescing works.
     mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated);
-    EXPECT_TRUE(obj != NULL);
+    EXPECT_TRUE(obj != nullptr);
     los->Free(Thread::Current(), obj);
 
     EXPECT_EQ(0U, los->GetBytesAllocated());
@@ -347,12 +349,13 @@
 }
 
 void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
-  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
-  ASSERT_TRUE(space != NULL);
+  MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+  ASSERT_TRUE(space != nullptr);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
   AddSpace(space);
   Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
 
   // Succeeds, fits without adjusting the max allowed footprint.
   mirror::Object* lots_of_objects[1024];
@@ -361,17 +364,16 @@
     size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
     lots_of_objects[i] = space->Alloc(self, size_of_zero_length_byte_array, &allocation_size);
     EXPECT_TRUE(lots_of_objects[i] != nullptr);
-    InstallClass(lots_of_objects[i], size_of_zero_length_byte_array);
+    SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
+    InstallClass(obj, size_of_zero_length_byte_array);
+    lots_of_objects[i] = obj.get();
     EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
   }
 
-  // Release memory and check pointers are NULL.
-  {
-    ScopedObjectAccess soa(self);
-    space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
-    for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
-      EXPECT_TRUE(lots_of_objects[i] == nullptr);
-    }
+  // Release memory and check pointers are nullptr.
+  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    EXPECT_TRUE(lots_of_objects[i] == nullptr);
   }
 
   // Succeeds, fits by adjusting the max allowed footprint.
@@ -379,17 +381,17 @@
     size_t allocation_size = 0;
     lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size);
     EXPECT_TRUE(lots_of_objects[i] != nullptr);
-    InstallClass(lots_of_objects[i], 1024);
+    SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
+    InstallClass(obj, 1024);
+    lots_of_objects[i] = obj.get();
     EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
   }
 
-  // Release memory and check pointers are NULL
-  {
-    ScopedObjectAccess soa(self);
-    space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
-    for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
-      EXPECT_TRUE(lots_of_objects[i] == nullptr);
-    }
+  // Release memory and check pointers are nullptr
+  // TODO: This isn't compaction safe, fix.
+  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    EXPECT_TRUE(lots_of_objects[i] == nullptr);
   }
 }
 
@@ -430,6 +432,7 @@
   size_t last_object = 0;  // last object for which allocation succeeded
   size_t amount_allocated = 0;  // amount of space allocated
   Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
   size_t rand_seed = 123456789;
   for (size_t i = 0; i < max_objects; i++) {
     size_t alloc_fails = 0;  // number of failed allocations
@@ -446,19 +449,19 @@
           alloc_size = size_of_zero_length_byte_array;
         }
       }
-      mirror::Object* object;
+      SirtRef<mirror::Object> object(self, nullptr);
       size_t bytes_allocated = 0;
       if (round <= 1) {
-        object = space->Alloc(self, alloc_size, &bytes_allocated);
+        object.reset(space->Alloc(self, alloc_size, &bytes_allocated));
       } else {
-        object = space->AllocWithGrowth(self, alloc_size, &bytes_allocated);
+        object.reset(space->AllocWithGrowth(self, alloc_size, &bytes_allocated));
       }
       footprint = space->GetFootprint();
       EXPECT_GE(space->Size(), footprint);  // invariant
-      if (object != NULL) {  // allocation succeeded
+      if (object.get() != nullptr) {  // allocation succeeded
         InstallClass(object, alloc_size);
-        lots_of_objects.get()[i] = object;
-        size_t allocation_size = space->AllocationSize(object);
+        lots_of_objects[i] = object.get();
+        size_t allocation_size = space->AllocationSize(object.get());
         EXPECT_EQ(bytes_allocated, allocation_size);
         if (object_size > 0) {
           EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
@@ -489,8 +492,11 @@
   // Release storage in a semi-adhoc manner
   size_t free_increment = 96;
   while (true) {
-    // Give the space a haircut
-    space->Trim();
+    {
+      ScopedThreadStateChange tsc(self, kNative);
+      // Give the space a haircut.
+      space->Trim();
+    }
 
     // Bounds sanity
     footprint = space->GetFootprint();
@@ -504,30 +510,28 @@
       break;
     }
 
-    {
-      // Free some objects
-      ScopedObjectAccess soa(self);
-      for (size_t i = 0; i < last_object; i += free_increment) {
-        mirror::Object* object = lots_of_objects.get()[i];
-        if (object == NULL) {
-          continue;
-        }
-        size_t allocation_size = space->AllocationSize(object);
-        if (object_size > 0) {
-          EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
-        } else {
-          EXPECT_GE(allocation_size, 8u);
-        }
-        space->Free(self, object);
-        lots_of_objects.get()[i] = NULL;
-        amount_allocated -= allocation_size;
-        footprint = space->GetFootprint();
-        EXPECT_GE(space->Size(), footprint);  // invariant
+    // Free some objects
+    for (size_t i = 0; i < last_object; i += free_increment) {
+      mirror::Object* object = lots_of_objects.get()[i];
+      if (object == nullptr) {
+        continue;
       }
-
-      free_increment >>= 1;
+      size_t allocation_size = space->AllocationSize(object);
+      if (object_size > 0) {
+        EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+      } else {
+        EXPECT_GE(allocation_size, 8u);
+      }
+      space->Free(self, object);
+      lots_of_objects.get()[i] = nullptr;
+      amount_allocated -= allocation_size;
+      footprint = space->GetFootprint();
+      EXPECT_GE(space->Size(), footprint);  // invariant
     }
+
+    free_increment >>= 1;
   }
+
   // The space has become empty here before allocating a large object
   // below. For RosAlloc, revoke thread-local runs, which are kept
   // even when empty for a performance reason, so that they won't
@@ -537,15 +541,15 @@
   space->RevokeAllThreadLocalBuffers();
 
   // All memory was released, try a large allocation to check freed memory is being coalesced
-  mirror::Object* large_object;
+  SirtRef<mirror::Object> large_object(self, nullptr);
   size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
   size_t bytes_allocated = 0;
   if (round <= 1) {
-    large_object = space->Alloc(self, three_quarters_space, &bytes_allocated);
+    large_object.reset(space->Alloc(self, three_quarters_space, &bytes_allocated));
   } else {
-    large_object = space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated);
+    large_object.reset(space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated));
   }
-  EXPECT_TRUE(large_object != NULL);
+  EXPECT_TRUE(large_object.get() != nullptr);
   InstallClass(large_object, three_quarters_space);
 
   // Sanity check footprint
@@ -555,10 +559,8 @@
   EXPECT_LE(space->Size(), growth_limit);
 
   // Clean up
-  {
-    ScopedObjectAccess soa(self);
-    space->Free(self, large_object);
-  }
+  space->Free(self, large_object.reset(nullptr));
+
   // Sanity check footprint
   footprint = space->GetFootprint();
   EXPECT_LE(footprint, growth_limit);
@@ -574,8 +576,8 @@
   size_t initial_size = 4 * MB;
   size_t growth_limit = 8 * MB;
   size_t capacity = 16 * MB;
-  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, NULL));
-  ASSERT_TRUE(space != NULL);
+  MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
+  ASSERT_TRUE(space != nullptr);
 
   // Basic sanity
   EXPECT_EQ(space->Capacity(), growth_limit);