Lazily compute object identity hash codes.

Before, we computed identity hashcodes whenever we inflated a monitor.
This caused issues since it meant that we would have all of these
hash codes in the image, causing locks to excessively inflate during
application run time.

This change makes it so that we lazily compute hash codes. When a
thin lock gets inflated, we assign a hash code of 0 assigned to it.
This value signifies no hash code. When we try to get the identity
hash code of an object with an inflated monitor, it gets computed if
it is 0.

Change-Id: Iae6acd1960515a36e74644e5b1323ff336731806
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index af60a38..75be2c9 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -504,7 +504,11 @@
       Monitor* monitor = lw.FatLockMonitor();
       CHECK(monitor != nullptr);
       CHECK(!monitor->IsLocked());
-      copy->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
+      if (monitor->HasHashCode()) {
+        copy->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
+      } else {
+        copy->SetLockWord(LockWord());
+      }
       break;
     }
     case LockWord::kThinLocked: {
@@ -512,9 +516,10 @@
       break;
     }
     case LockWord::kUnlocked:
-      // Fall-through.
+      break;
     case LockWord::kHashCode:
       // Do nothing since we can just keep the same hash code.
+      CHECK_NE(lw.GetHashCode(), 0);
       break;
     default:
       LOG(FATAL) << "Unreachable.";
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 50a5176..9a853d0 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -325,7 +325,7 @@
 ENTRY art_quick_lock_object
     cbz    r0, slow_lock
 retry_lock:
-    ldrt   r2, [r9, #THREAD_ID_OFFSET]
+    ldr    r2, [r9, #THREAD_ID_OFFSET]
     ldrex  r1, [r0, #LOCK_WORD_OFFSET]
     cbnz   r1, not_unlocked           @ already thin locked
     @ unlocked case - r2 holds thread id with count of 0
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index 59947f5..efd3d9d 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -45,7 +45,7 @@
   DCHECK_EQ(FatLockMonitor(), mon);
 }
 
-inline uint32_t LockWord::GetHashCode() const {
+inline int32_t LockWord::GetHashCode() const {
   DCHECK_EQ(GetState(), kHashCode);
   return (value_ >> kHashShift) & kHashMask;
 }
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 9b6c64a..1882ae6 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -132,7 +132,7 @@
   }
 
   // Return the hash code stored in the lock word, must be kHashCode state.
-  uint32_t GetHashCode() const;
+  int32_t GetHashCode() const;
 
   uint32_t GetValue() const {
     return value_;
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 49bad4c..bd187c1 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -84,14 +84,14 @@
   return copy.get();
 }
 
-uint32_t Object::GenerateIdentityHashCode() {
+int32_t Object::GenerateIdentityHashCode() {
   static AtomicInteger seed(987654321 + std::time(nullptr));
-  uint32_t expected_value, new_value;
+  int32_t expected_value, new_value;
   do {
     expected_value = static_cast<uint32_t>(seed.load());
     new_value = expected_value * 1103515245 + 12345;
-  } while (!seed.compare_and_swap(static_cast<int32_t>(expected_value),
-                                  static_cast<int32_t>(new_value)));
+  } while ((expected_value & LockWord::kHashMask) == 0 ||
+      !seed.compare_and_swap(expected_value, new_value));
   return expected_value & LockWord::kHashMask;
 }
 
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 11473cd..e8ea3f2 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -249,7 +249,7 @@
   }
 
   // Generate an identity hash code.
-  static uint32_t GenerateIdentityHashCode();
+  static int32_t GenerateIdentityHashCode();
 
   // Write barrier called post update to a reference bearing field.
   static void WriteBarrierField(const Object* dst, MemberOffset offset, const Object* new_value);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index aa47bda..2abfd3d 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -79,7 +79,7 @@
   is_sensitive_thread_hook_ = is_sensitive_thread_hook;
 }
 
-Monitor::Monitor(Thread* owner, mirror::Object* obj, uint32_t hash_code)
+Monitor::Monitor(Thread* owner, mirror::Object* obj, int32_t hash_code)
     : monitor_lock_("a monitor lock", kMonitorLock),
       monitor_contenders_("monitor contenders", monitor_lock_),
       owner_(owner),
@@ -95,6 +95,16 @@
   // The identity hash code is set for the life time of the monitor.
 }
 
+int32_t Monitor::GetHashCode() {
+  while (!HasHashCode()) {
+    if (hash_code_.compare_and_swap(0, mirror::Object::GenerateIdentityHashCode())) {
+      break;
+    }
+  }
+  DCHECK(HasHashCode());
+  return hash_code_.load();
+}
+
 bool Monitor::Install(Thread* self) {
   MutexLock mu(self, monitor_lock_);  // Uncontended mutex acquisition as monitor isn't yet public.
   CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
@@ -107,7 +117,7 @@
       break;
     }
     case LockWord::kHashCode: {
-      CHECK_EQ(hash_code_, lw.GetHashCode());
+      CHECK_EQ(hash_code_, static_cast<int32_t>(lw.GetHashCode()));
       break;
     }
     case LockWord::kFatLocked: {
@@ -622,7 +632,7 @@
             return;  // Success!
           } else {
             // We'd overflow the recursion count, so inflate the monitor.
-            InflateThinLocked(self, obj, lock_word, mirror::Object::GenerateIdentityHashCode());
+            InflateThinLocked(self, obj, lock_word, 0);
           }
         } else {
           // Contention.
@@ -632,7 +642,7 @@
             NanoSleep(1000);  // Sleep for 1us and re-attempt.
           } else {
             contention_count = 0;
-            InflateThinLocked(self, obj, lock_word, mirror::Object::GenerateIdentityHashCode());
+            InflateThinLocked(self, obj, lock_word, 0);
           }
         }
         continue;  // Start from the beginning.
@@ -716,7 +726,7 @@
         return;  // Failure.
       } else {
         // We own the lock, inflate to enqueue ourself on the Monitor.
-        Inflate(self, self, obj, mirror::Object::GenerateIdentityHashCode());
+        Inflate(self, self, obj, 0);
         lock_word = obj->GetLockWord();
       }
       break;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index c464400..09cfafa 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -24,6 +24,7 @@
 #include <list>
 #include <vector>
 
+#include "atomic_integer.h"
 #include "base/mutex.h"
 #include "root_visitor.h"
 #include "thread_state.h"
@@ -98,17 +99,19 @@
     return owner_;
   }
 
-  int32_t GetHashCode() const {
-    return hash_code_;
-  }
+  int32_t GetHashCode();
 
   bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  bool HasHashCode() const {
+    return hash_code_.load() != 0;
+  }
+
   static void InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock_word,
                                 uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
 
  private:
-  explicit Monitor(Thread* owner, mirror::Object* obj, uint32_t hash_code)
+  explicit Monitor(Thread* owner, mirror::Object* obj, int32_t hash_code)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Install the monitor into its object, may fail if another thread installs a different monitor
@@ -179,8 +182,8 @@
   // Threads currently waiting on this monitor.
   Thread* wait_set_ GUARDED_BY(monitor_lock_);
 
-  // Stored object hash code, always generated.
-  const uint32_t hash_code_;
+  // Stored object hash code, generated lazily by GetHashCode.
+  AtomicInteger hash_code_;
 
   // Method and dex pc where the lock owner acquired the lock, used when lock
   // sampling is enabled. locking_method_ may be null if the lock is currently