Compacting collector.

The compacting collector is currently similar to semispace. It works by
copying objects back and forth between two bump pointer spaces. There
are types of objects which are "non-movable" due to current runtime
limitations. These are Classes, Methods, and Fields.

Bump pointer spaces are a new type of continuous alloc space which have
no lock in the allocation code path. When you allocate from these it uses
atomic operations to increase an index. Traversing the objects in the bump
pointer space relies on Object::SizeOf matching the allocated size exactly.

Runtime changes:
JNI::GetArrayElements returns copies objects if you attempt to get the
backing data of a movable array. For GetArrayElementsCritical, we return
direct backing storage for any types of arrays, but temporarily disable
the GC until the critical region is completed.

Added a new runtime call called VisitObjects, this is used in place of
the old pattern which was flushing the allocation stack and walking
the bitmaps.

Changed image writer to be compaction safe and use object monitor word
for forwarding addresses.

Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc..

TODO: Enable switching allocators, compacting on background, etc..

Bug: 8981901

Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 87d02c9..385ef5f 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -39,38 +39,48 @@
 namespace art {
 namespace mirror {
 
-Object* Object::Clone(Thread* self) {
-  mirror::Class* c = GetClass();
-  DCHECK(!c->IsClassClass());
-  // Object::SizeOf gets the right size even if we're an array.
-  // Using c->AllocObject() here would be wrong.
-  size_t num_bytes = SizeOf();
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  SirtRef<mirror::Object> sirt_this(self, this);
-  Object* copy = heap->AllocObject(self, c, num_bytes);
-  if (UNLIKELY(copy == nullptr)) {
-    return nullptr;
-  }
+static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src, size_t num_bytes)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Copy instance data.  We assume memcpy copies by words.
   // TODO: expose and use move32.
-  byte* src_bytes = reinterpret_cast<byte*>(sirt_this.get());
-  byte* dst_bytes = reinterpret_cast<byte*>(copy);
+  byte* src_bytes = reinterpret_cast<byte*>(src);
+  byte* dst_bytes = reinterpret_cast<byte*>(dest);
   size_t offset = sizeof(Object);
   memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset);
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   // Perform write barriers on copied object references.
-  c = copy->GetClass();  // Re-read Class in case it moved.
+  Class* c = src->GetClass();
   if (c->IsArrayClass()) {
     if (!c->GetComponentType()->IsPrimitive()) {
-      const ObjectArray<Object>* array = copy->AsObjectArray<Object>();
-      heap->WriteBarrierArray(copy, 0, array->GetLength());
+      const ObjectArray<Object>* array = dest->AsObjectArray<Object>();
+      heap->WriteBarrierArray(dest, 0, array->GetLength());
     }
   } else {
-    heap->WriteBarrierEveryFieldOf(copy);
+    heap->WriteBarrierEveryFieldOf(dest);
   }
   if (c->IsFinalizable()) {
-    SirtRef<mirror::Object> sirt_copy(self, copy);
-    heap->AddFinalizerReference(self, copy);
-    return sirt_copy.get();
+    SirtRef<Object> sirt_dest(self, dest);
+    heap->AddFinalizerReference(self, dest);
+    return sirt_dest.get();
+  }
+  return dest;
+}
+
+Object* Object::Clone(Thread* self) {
+  CHECK(!IsClass()) << "Can't clone classes.";
+  // Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would
+  // be wrong.
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  size_t num_bytes = SizeOf();
+  SirtRef<Object> this_object(self, this);
+  Object* copy;
+  if (heap->IsMovableObject(this)) {
+    copy = heap->AllocObject(self, GetClass(), num_bytes);
+  } else {
+    copy = heap->AllocNonMovableObject(self, GetClass(), num_bytes);
+  }
+  if (LIKELY(copy != nullptr)) {
+    return CopyObject(self, copy, this_object.get(), num_bytes);
   }
   return copy;
 }
@@ -87,8 +97,9 @@
 }
 
 int32_t Object::IdentityHashCode() const {
+  mirror::Object* current_this = const_cast<mirror::Object*>(this);
   while (true) {
-    LockWord lw = GetLockWord();
+    LockWord lw = current_this->GetLockWord();
     switch (lw.GetState()) {
       case LockWord::kUnlocked: {
         // Try to compare and swap in a new hash, if we succeed we will return the hash on the next
@@ -103,7 +114,10 @@
       case LockWord::kThinLocked: {
         // Inflate the thin lock to a monitor and stick the hash code inside of the monitor.
         Thread* self = Thread::Current();
-        Monitor::InflateThinLocked(self, const_cast<Object*>(this), lw, GenerateIdentityHashCode());
+        SirtRef<mirror::Object> sirt_this(self, current_this);
+        Monitor::InflateThinLocked(self, sirt_this, lw, GenerateIdentityHashCode());
+        // A GC may have occurred when we switched to kBlocked.
+        current_this = sirt_this.get();
         break;
       }
       case LockWord::kFatLocked: {
@@ -115,6 +129,10 @@
       case LockWord::kHashCode: {
         return lw.GetHashCode();
       }
+      default: {
+        LOG(FATAL) << "Invalid state during hashcode " << lw.GetState();
+        break;
+      }
     }
   }
   LOG(FATAL) << "Unreachable";