Compacting collector.

The compacting collector is currently similar to semispace. It works by
copying objects back and forth between two bump pointer spaces. There
are types of objects which are "non-movable" due to current runtime
limitations. These are Classes, Methods, and Fields.

Bump pointer spaces are a new type of continuous alloc space which have
no lock in the allocation code path. When you allocate from these it uses
atomic operations to increase an index. Traversing the objects in the bump
pointer space relies on Object::SizeOf matching the allocated size exactly.

Runtime changes:
JNI::GetArrayElements returns copies objects if you attempt to get the
backing data of a movable array. For GetArrayElementsCritical, we return
direct backing storage for any types of arrays, but temporarily disable
the GC until the critical region is completed.

Added a new runtime call called VisitObjects, this is used in place of
the old pattern which was flushing the allocation stack and walking
the bitmaps.

Changed image writer to be compaction safe and use object monitor word
for forwarding addresses.

Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc..

TODO: Enable switching allocators, compacting on background, etc..

Bug: 8981901

Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 2abfd3d..7fada9e 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -128,6 +128,10 @@
       LOG(FATAL) << "Inflating unlocked lock word";
       break;
     }
+    default: {
+      LOG(FATAL) << "Invalid monitor state " << lw.GetState();
+      return false;
+    }
   }
   LockWord fat(this);
   // Publish the updated lock word, which may race with other threads.
@@ -140,8 +144,7 @@
 }
 
 Monitor::~Monitor() {
-  CHECK(obj_ != NULL);
-  CHECK_EQ(obj_->GetLockWord().GetState(), LockWord::kFatLocked);
+  // Deflated monitors have a null object.
 }
 
 /*
@@ -559,6 +562,43 @@
   }
 }
 
+bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
+  DCHECK(obj != nullptr);
+  LockWord lw(obj->GetLockWord());
+  // If the lock isn't an inflated monitor, then we don't need to deflate anything.
+  if (lw.GetState() == LockWord::kFatLocked) {
+    Monitor* monitor = lw.FatLockMonitor();
+    CHECK(monitor != nullptr);
+    MutexLock mu(self, monitor->monitor_lock_);
+    Thread* owner = monitor->owner_;
+    if (owner != nullptr) {
+      // Can't deflate if we are locked and have a hash code.
+      if (monitor->HasHashCode()) {
+        return false;
+      }
+      // Can't deflate if our lock count is too high.
+      if (monitor->lock_count_ > LockWord::kThinLockMaxCount) {
+        return false;
+      }
+      // Can't deflate if we have anybody waiting on the CV.
+      if (monitor->monitor_contenders_.GetNumWaiters() > 0) {
+        return false;
+      }
+      // Deflate to a thin lock.
+      obj->SetLockWord(LockWord::FromThinLockId(owner->GetTid(), monitor->lock_count_));
+    } else if (monitor->HasHashCode()) {
+      obj->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
+    } else {
+      // No lock and no hash, just put an empty lock word inside the object.
+      obj->SetLockWord(LockWord());
+    }
+    // The monitor is deflated, mark the object as nullptr so that we know to delete it during the
+    // next GC.
+    monitor->obj_ = nullptr;
+  }
+  return true;
+}
+
 /*
  * Changes the shape of a monitor from thin to fat, preserving the internal lock state. The calling
  * thread must own the lock or the owner must be suspended. There's a race with other threads
@@ -577,13 +617,13 @@
   }
 }
 
-void Monitor::InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock_word,
+void Monitor::InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, LockWord lock_word,
                                 uint32_t hash_code) {
   DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
   uint32_t owner_thread_id = lock_word.ThinLockOwner();
   if (owner_thread_id == self->GetThreadId()) {
     // We own the monitor, we can easily inflate it.
-    Inflate(self, self, obj, hash_code);
+    Inflate(self, self, obj.get(), hash_code);
   } else {
     ThreadList* thread_list = Runtime::Current()->GetThreadList();
     // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
@@ -598,7 +638,7 @@
         if (lock_word.GetState() == LockWord::kThinLocked &&
             lock_word.ThinLockOwner() == owner_thread_id) {
           // Go ahead and inflate the lock.
-          Inflate(self, owner, obj, hash_code);
+          Inflate(self, owner, obj.get(), hash_code);
         }
         thread_list->Resume(owner, false);
       }
@@ -611,12 +651,13 @@
   DCHECK(obj != NULL);
   uint32_t thread_id = self->GetThreadId();
   size_t contention_count = 0;
+  SirtRef<mirror::Object> sirt_obj(self, obj);
   while (true) {
-    LockWord lock_word = obj->GetLockWord();
+    LockWord lock_word = sirt_obj->GetLockWord();
     switch (lock_word.GetState()) {
       case LockWord::kUnlocked: {
         LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0));
-        if (obj->CasLockWord(lock_word, thin_locked)) {
+        if (sirt_obj->CasLockWord(lock_word, thin_locked)) {
           return;  // Success!
         }
         continue;  // Go again.
@@ -628,11 +669,11 @@
           uint32_t new_count = lock_word.ThinLockCount() + 1;
           if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
             LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
-            obj->SetLockWord(thin_locked);
+            sirt_obj->SetLockWord(thin_locked);
             return;  // Success!
           } else {
             // We'd overflow the recursion count, so inflate the monitor.
-            InflateThinLocked(self, obj, lock_word, 0);
+            InflateThinLocked(self, sirt_obj, lock_word, 0);
           }
         } else {
           // Contention.
@@ -642,7 +683,7 @@
             NanoSleep(1000);  // Sleep for 1us and re-attempt.
           } else {
             contention_count = 0;
-            InflateThinLocked(self, obj, lock_word, 0);
+            InflateThinLocked(self, sirt_obj, lock_word, 0);
           }
         }
         continue;  // Start from the beginning.
@@ -654,9 +695,13 @@
       }
       case LockWord::kHashCode: {
         // Inflate with the existing hashcode.
-        Inflate(self, nullptr, obj, lock_word.GetHashCode());
+        Inflate(self, nullptr, sirt_obj.get(), lock_word.GetHashCode());
         break;
       }
+      default: {
+        LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
+        return;
+      }
     }
   }
 }
@@ -666,11 +711,12 @@
   DCHECK(obj != NULL);
 
   LockWord lock_word = obj->GetLockWord();
+  SirtRef<mirror::Object> sirt_obj(self, obj);
   switch (lock_word.GetState()) {
     case LockWord::kHashCode:
       // Fall-through.
     case LockWord::kUnlocked:
-      FailedUnlock(obj, self, NULL, NULL);
+      FailedUnlock(sirt_obj.get(), self, NULL, NULL);
       return false;  // Failure.
     case LockWord::kThinLocked: {
       uint32_t thread_id = self->GetThreadId();
@@ -679,16 +725,16 @@
         // TODO: there's a race here with the owner dying while we unlock.
         Thread* owner =
             Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
-        FailedUnlock(obj, self, owner, NULL);
+        FailedUnlock(sirt_obj.get(), self, owner, NULL);
         return false;  // Failure.
       } else {
         // We own the lock, decrease the recursion count.
         if (lock_word.ThinLockCount() != 0) {
           uint32_t new_count = lock_word.ThinLockCount() - 1;
           LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
-          obj->SetLockWord(thin_locked);
+          sirt_obj->SetLockWord(thin_locked);
         } else {
-          obj->SetLockWord(LockWord());
+          sirt_obj->SetLockWord(LockWord());
         }
         return true;  // Success!
       }
@@ -697,9 +743,10 @@
       Monitor* mon = lock_word.FatLockMonitor();
       return mon->Unlock(self);
     }
-    default:
-      LOG(FATAL) << "Unreachable";
+    default: {
+      LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
       return false;
+    }
   }
 }
 
@@ -733,6 +780,10 @@
     }
     case LockWord::kFatLocked:
       break;  // Already set for a wait.
+    default: {
+      LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
+      return;
+    }
   }
   Monitor* mon = lock_word.FatLockMonitor();
   mon->Wait(self, ms, ns, interruptShouldThrow, why);
@@ -769,6 +820,10 @@
       }
       return;  // Success.
     }
+    default: {
+      LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
+      return;
+    }
   }
 }
 
@@ -787,9 +842,10 @@
       Monitor* mon = lock_word.FatLockMonitor();
       return mon->GetOwnerThreadId();
     }
-    default:
+    default: {
       LOG(FATAL) << "Unreachable";
       return ThreadList::kInvalidThreadId;
+    }
   }
 }
 
@@ -1011,7 +1067,8 @@
   for (auto it = list_.begin(); it != list_.end(); ) {
     Monitor* m = *it;
     mirror::Object* obj = m->GetObject();
-    mirror::Object* new_obj = visitor(obj, arg);
+    // The object of a monitor can be null if we have deflated it.
+    mirror::Object* new_obj = obj != nullptr ? visitor(obj, arg) : nullptr;
     if (new_obj == nullptr) {
       VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
                     << m->GetObject();
@@ -1031,6 +1088,8 @@
   switch (lock_word.GetState()) {
     case LockWord::kUnlocked:
       // Fall-through.
+    case LockWord::kForwardingAddress:
+      // Fall-through.
     case LockWord::kHashCode:
       break;
     case LockWord::kThinLocked: