Object model changes to support 64bit.

Modify mirror objects so that references between them use an ObjectReference
value type rather than an Object* so that functionality to compress larger
references can be captured in the ObjectRefererence implementation.
ObjectReferences are 32bit and all other aspects of object layout remain as
they are currently.

Expand fields in objects holding pointers so they can hold 64bit pointers. Its
expected the size of these will come down by improving where we hold compiler
meta-data.
Stub out x86_64 architecture specific runtime implementation.
Modify OutputStream so that reads and writes are of unsigned quantities.
Make the use of portable or quick code more explicit.
Templatize AtomicInteger to support more than just int32_t as a type.
Add missing, and fix issues relating to, missing annotalysis information on the
mutator lock.
Refactor and share implementations for array copy between System and uses
elsewhere in the runtime.
Fix numerous 64bit build issues.

Change-Id: I1a5694c251a42c9eff71084dfdd4b51fff716822
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index bd81bd5..b2725e5 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -27,7 +27,7 @@
 namespace art {
 namespace mirror {
 
-inline size_t Array::SizeOf() const {
+inline size_t Array::SizeOf() {
   // This is safe from overflow because the array was already allocated, so we know it's sane.
   size_t component_size = GetClass()->GetComponentSize();
   int32_t component_count = GetLength();
@@ -64,9 +64,10 @@
   explicit SetLengthVisitor(int32_t length) : length_(length) {
   }
 
-  void operator()(mirror::Object* obj) const {
-    mirror::Array* array = obj->AsArray();
-    DCHECK(array->IsArrayInstance());
+  void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    // Avoid AsArray as object is not yet in live bitmap or allocation stack.
+    Array* array = down_cast<Array*>(obj);
+    // DCHECK(array->IsArrayInstance());
     array->SetLength(length_);
   }
 
@@ -116,6 +117,114 @@
   }
 }
 
+// Similar to memmove except elements are of aligned appropriately for T, count is in T sized units
+// copies are guaranteed not to tear when T is less-than 64bit.
+template<typename T>
+static inline void ArrayBackwardCopy(T* d, const T* s, int32_t count) {
+  d += count;
+  s += count;
+  for (int32_t i = 0; i < count; ++i) {
+    d--;
+    s--;
+    *d = *s;
+  }
+}
+
+template<class T>
+void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+                                int32_t count) {
+  if (UNLIKELY(count == 0)) {
+    return;
+  }
+  DCHECK_GE(dst_pos, 0);
+  DCHECK_GE(src_pos, 0);
+  DCHECK_GT(count, 0);
+  DCHECK(src != nullptr);
+  DCHECK_LT(dst_pos, GetLength());
+  DCHECK_LE(dst_pos, GetLength() - count);
+  DCHECK_LT(src_pos, src->GetLength());
+  DCHECK_LE(src_pos, src->GetLength() - count);
+
+  // Note for non-byte copies we can't rely on standard libc functions like memcpy(3) and memmove(3)
+  // in our implementation, because they may copy byte-by-byte.
+  if (LIKELY(src != this) || (dst_pos < src_pos) || (dst_pos - src_pos >= count)) {
+    // Forward copy ok.
+    Memcpy(dst_pos, src, src_pos, count);
+  } else {
+    // Backward copy necessary.
+    void* dst_raw = GetRawData(sizeof(T), dst_pos);
+    const void* src_raw = src->GetRawData(sizeof(T), src_pos);
+    if (sizeof(T) == sizeof(uint8_t)) {
+      // TUNING: use memmove here?
+      uint8_t* d = reinterpret_cast<uint8_t*>(dst_raw);
+      const uint8_t* s = reinterpret_cast<const uint8_t*>(src_raw);
+      ArrayBackwardCopy<uint8_t>(d, s, count);
+    } else if (sizeof(T) == sizeof(uint16_t)) {
+      uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw);
+      const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw);
+      ArrayBackwardCopy<uint16_t>(d, s, count);
+    } else if (sizeof(T) == sizeof(uint32_t)) {
+      uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw);
+      const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw);
+      ArrayBackwardCopy<uint32_t>(d, s, count);
+    } else {
+      DCHECK_EQ(sizeof(T), sizeof(uint64_t));
+      uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw);
+      const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw);
+      ArrayBackwardCopy<uint64_t>(d, s, count);
+    }
+  }
+}
+
+// Similar to memcpy except elements are of aligned appropriately for T, count is in T sized units
+// copies are guaranteed not to tear when T is less-than 64bit.
+template<typename T>
+static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) {
+  for (int32_t i = 0; i < count; ++i) {
+    *d = *s;
+    d++;
+    s++;
+  }
+}
+
+
+template<class T>
+void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+                               int32_t count) {
+  if (UNLIKELY(count == 0)) {
+    return;
+  }
+  DCHECK_GE(dst_pos, 0);
+  DCHECK_GE(src_pos, 0);
+  DCHECK_GT(count, 0);
+  DCHECK(src != nullptr);
+  DCHECK_LT(dst_pos, GetLength());
+  DCHECK_LE(dst_pos, GetLength() - count);
+  DCHECK_LT(src_pos, src->GetLength());
+  DCHECK_LE(src_pos, src->GetLength() - count);
+
+  // Note for non-byte copies we can't rely on standard libc functions like memcpy(3) and memmove(3)
+  // in our implementation, because they may copy byte-by-byte.
+  void* dst_raw = GetRawData(sizeof(T), dst_pos);
+  const void* src_raw = src->GetRawData(sizeof(T), src_pos);
+  if (sizeof(T) == sizeof(uint8_t)) {
+    memcpy(dst_raw, src_raw, count);
+  } else if (sizeof(T) == sizeof(uint16_t)) {
+    uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw);
+    const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw);
+    ArrayForwardCopy<uint16_t>(d, s, count);
+  } else if (sizeof(T) == sizeof(uint32_t)) {
+    uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw);
+    const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw);
+    ArrayForwardCopy<uint32_t>(d, s, count);
+  } else {
+    DCHECK_EQ(sizeof(T), sizeof(uint64_t));
+    uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw);
+    const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw);
+    ArrayForwardCopy<uint64_t>(d, s, count);
+  }
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 00b88db..ca0d1f3 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -103,11 +103,11 @@
   return new_array;
 }
 
-void Array::ThrowArrayIndexOutOfBoundsException(int32_t index) const {
+void Array::ThrowArrayIndexOutOfBoundsException(int32_t index) {
   art::ThrowArrayIndexOutOfBoundsException(index, GetLength());
 }
 
-void Array::ThrowArrayStoreException(Object* object) const {
+void Array::ThrowArrayStoreException(Object* object) {
   art::ThrowArrayStoreException(object->GetClass(), this->GetClass());
 }
 
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 207573f..6e366a0 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -50,15 +50,15 @@
   static Array* CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t SizeOf() const;
+  size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  int32_t GetLength() const {
+  int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Array, length_), false);
   }
 
-  void SetLength(int32_t length) {
+  void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     CHECK_GE(length, 0);
-    SetField32(OFFSET_OF_OBJECT_MEMBER(Array, length_), length, false);
+    SetField32(OFFSET_OF_OBJECT_MEMBER(Array, length_), length, false, false);
   }
 
   static MemberOffset LengthOffset() {
@@ -74,20 +74,22 @@
     }
   }
 
-  void* GetRawData(size_t component_size) {
-    intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value();
+  void* GetRawData(size_t component_size, int32_t index)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
+        + (index * component_size);
     return reinterpret_cast<void*>(data);
   }
 
-  const void* GetRawData(size_t component_size) const {
-    intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value();
-    return reinterpret_cast<const void*>(data);
+  const void* GetRawData(size_t component_size, int32_t index) const {
+    intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
+        + (index * component_size);
+    return reinterpret_cast<void*>(data);
   }
 
   // Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
   // returns false.
-  bool CheckIsValidIndex(int32_t index) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     if (UNLIKELY(static_cast<uint32_t>(index) >= static_cast<uint32_t>(GetLength()))) {
       ThrowArrayIndexOutOfBoundsException(index);
       return false;
@@ -96,11 +98,10 @@
   }
 
  protected:
-  void ThrowArrayStoreException(Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
-  void ThrowArrayIndexOutOfBoundsException(int32_t index) const
+  void ThrowArrayIndexOutOfBoundsException(int32_t index)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // The number of array elements.
@@ -119,17 +120,15 @@
   static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  const T* GetData() const {
-    intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(sizeof(T)).Int32Value();
-    return reinterpret_cast<T*>(data);
+  const T* GetData() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
   }
 
-  T* GetData() {
-    intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(sizeof(T)).Int32Value();
-    return reinterpret_cast<T*>(data);
+  T* GetData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
   }
 
-  T Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  T Get(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     if (UNLIKELY(!CheckIsValidIndex(i))) {
       DCHECK(Thread::Current()->IsExceptionPending());
       return T(0);
@@ -137,7 +136,7 @@
     return GetWithoutChecks(i);
   }
 
-  T GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  T GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(CheckIsValidIndex(i));
     return GetData()[i];
   }
@@ -155,6 +154,22 @@
     GetData()[i] = value;
   }
 
+  /*
+   * Works like memmove(), except we guarantee not to allow tearing of array values (ie using
+   * smaller than element size copies). Arguments are assumed to be within the bounds of the array
+   * and the arrays non-null.
+   */
+  void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  /*
+   * Works like memcpy(), except we guarantee not to allow tearing of array values (ie using
+   * smaller than element size copies). Arguments are assumed to be within the bounds of the array
+   * and the arrays non-null.
+   */
+  void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   static void SetArrayClass(Class* array_class) {
     CHECK(array_class_ == NULL);
     CHECK(array_class != NULL);
diff --git a/runtime/mirror/art_field-inl.h b/runtime/mirror/art_field-inl.h
index d8c278c..530226b 100644
--- a/runtime/mirror/art_field-inl.h
+++ b/runtime/mirror/art_field-inl.h
@@ -29,8 +29,8 @@
 namespace art {
 namespace mirror {
 
-inline Class* ArtField::GetDeclaringClass() const {
-  Class* result = GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_), false);
+inline Class* ArtField::GetDeclaringClass() {
+  Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_), false);
   DCHECK(result != NULL);
   DCHECK(result->IsLoaded() || result->IsErroneous());
   return result;
@@ -40,106 +40,106 @@
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_), new_declaring_class, false);
 }
 
-inline uint32_t ArtField::GetAccessFlags() const {
+inline uint32_t ArtField::GetAccessFlags() {
   DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
   return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, access_flags_), false);
 }
 
-inline MemberOffset ArtField::GetOffset() const {
+inline MemberOffset ArtField::GetOffset() {
   DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous());
   return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_), false));
 }
 
-inline MemberOffset ArtField::GetOffsetDuringLinking() const {
+inline MemberOffset ArtField::GetOffsetDuringLinking() {
   DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
   return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_), false));
 }
 
-inline uint32_t ArtField::Get32(const Object* object) const {
+inline uint32_t ArtField::Get32(Object* object) {
   DCHECK(object != NULL) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   return object->GetField32(GetOffset(), IsVolatile());
 }
 
-inline void ArtField::Set32(Object* object, uint32_t new_value) const {
+inline void ArtField::Set32(Object* object, uint32_t new_value) {
   DCHECK(object != NULL) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   object->SetField32(GetOffset(), new_value, IsVolatile());
 }
 
-inline uint64_t ArtField::Get64(const Object* object) const {
+inline uint64_t ArtField::Get64(Object* object) {
   DCHECK(object != NULL) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   return object->GetField64(GetOffset(), IsVolatile());
 }
 
-inline void ArtField::Set64(Object* object, uint64_t new_value) const {
+inline void ArtField::Set64(Object* object, uint64_t new_value) {
   DCHECK(object != NULL) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   object->SetField64(GetOffset(), new_value, IsVolatile());
 }
 
-inline Object* ArtField::GetObj(const Object* object) const {
+inline Object* ArtField::GetObj(Object* object) {
   DCHECK(object != NULL) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
-  return object->GetFieldObject<Object*>(GetOffset(), IsVolatile());
+  return object->GetFieldObject<Object>(GetOffset(), IsVolatile());
 }
 
-inline void ArtField::SetObj(Object* object, const Object* new_value) const {
+inline void ArtField::SetObj(Object* object, Object* new_value) {
   DCHECK(object != NULL) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   object->SetFieldObject(GetOffset(), new_value, IsVolatile());
 }
 
-inline bool ArtField::GetBoolean(const Object* object) const {
+inline bool ArtField::GetBoolean(Object* object) {
   DCHECK_EQ(Primitive::kPrimBoolean, FieldHelper(this).GetTypeAsPrimitiveType())
       << PrettyField(this);
   return Get32(object);
 }
 
-inline void ArtField::SetBoolean(Object* object, bool z) const {
+inline void ArtField::SetBoolean(Object* object, bool z) {
   DCHECK_EQ(Primitive::kPrimBoolean, FieldHelper(this).GetTypeAsPrimitiveType())
       << PrettyField(this);
   Set32(object, z);
 }
 
-inline int8_t ArtField::GetByte(const Object* object) const {
+inline int8_t ArtField::GetByte(Object* object) {
   DCHECK_EQ(Primitive::kPrimByte, FieldHelper(this).GetTypeAsPrimitiveType())
       << PrettyField(this);
   return Get32(object);
 }
 
-inline void ArtField::SetByte(Object* object, int8_t b) const {
+inline void ArtField::SetByte(Object* object, int8_t b) {
   DCHECK_EQ(Primitive::kPrimByte, FieldHelper(this).GetTypeAsPrimitiveType())
       << PrettyField(this);
   Set32(object, b);
 }
 
-inline uint16_t ArtField::GetChar(const Object* object) const {
+inline uint16_t ArtField::GetChar(Object* object) {
   DCHECK_EQ(Primitive::kPrimChar, FieldHelper(this).GetTypeAsPrimitiveType())
       << PrettyField(this);
   return Get32(object);
 }
 
-inline void ArtField::SetChar(Object* object, uint16_t c) const {
+inline void ArtField::SetChar(Object* object, uint16_t c) {
   DCHECK_EQ(Primitive::kPrimChar, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   Set32(object, c);
 }
 
-inline int16_t ArtField::GetShort(const Object* object) const {
+inline int16_t ArtField::GetShort(Object* object) {
   DCHECK_EQ(Primitive::kPrimShort, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   return Get32(object);
 }
 
-inline void ArtField::SetShort(Object* object, int16_t s) const {
+inline void ArtField::SetShort(Object* object, int16_t s) {
   DCHECK_EQ(Primitive::kPrimShort, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   Set32(object, s);
 }
 
-inline int32_t ArtField::GetInt(const Object* object) const {
+inline int32_t ArtField::GetInt(Object* object) {
 #ifndef NDEBUG
   Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType();
   CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this);
@@ -147,7 +147,7 @@
   return Get32(object);
 }
 
-inline void ArtField::SetInt(Object* object, int32_t i) const {
+inline void ArtField::SetInt(Object* object, int32_t i) {
 #ifndef NDEBUG
   Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType();
   CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this);
@@ -155,7 +155,7 @@
   Set32(object, i);
 }
 
-inline int64_t ArtField::GetLong(const Object* object) const {
+inline int64_t ArtField::GetLong(Object* object) {
 #ifndef NDEBUG
   Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType();
   CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this);
@@ -163,7 +163,7 @@
   return Get64(object);
 }
 
-inline void ArtField::SetLong(Object* object, int64_t j) const {
+inline void ArtField::SetLong(Object* object, int64_t j) {
 #ifndef NDEBUG
   Primitive::Type type = FieldHelper(this).GetTypeAsPrimitiveType();
   CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this);
@@ -171,7 +171,7 @@
   Set64(object, j);
 }
 
-inline float ArtField::GetFloat(const Object* object) const {
+inline float ArtField::GetFloat(Object* object) {
   DCHECK_EQ(Primitive::kPrimFloat, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   JValue bits;
@@ -179,7 +179,7 @@
   return bits.GetF();
 }
 
-inline void ArtField::SetFloat(Object* object, float f) const {
+inline void ArtField::SetFloat(Object* object, float f) {
   DCHECK_EQ(Primitive::kPrimFloat, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   JValue bits;
@@ -187,7 +187,7 @@
   Set32(object, bits.GetI());
 }
 
-inline double ArtField::GetDouble(const Object* object) const {
+inline double ArtField::GetDouble(Object* object) {
   DCHECK_EQ(Primitive::kPrimDouble, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   JValue bits;
@@ -195,7 +195,7 @@
   return bits.GetD();
 }
 
-inline void ArtField::SetDouble(Object* object, double d) const {
+inline void ArtField::SetDouble(Object* object, double d) {
   DCHECK_EQ(Primitive::kPrimDouble, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   JValue bits;
@@ -203,13 +203,13 @@
   Set64(object, bits.GetJ());
 }
 
-inline Object* ArtField::GetObject(const Object* object) const {
+inline Object* ArtField::GetObject(Object* object) {
   DCHECK_EQ(Primitive::kPrimNot, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   return GetObj(object);
 }
 
-inline void ArtField::SetObject(Object* object, const Object* l) const {
+inline void ArtField::SetObject(Object* object, Object* l) {
   DCHECK_EQ(Primitive::kPrimNot, FieldHelper(this).GetTypeAsPrimitiveType())
        << PrettyField(this);
   SetObj(object, l);
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index 62bcf06..b33fe4b 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -30,98 +30,74 @@
 // C++ mirror of java.lang.reflect.ArtField
 class MANAGED ArtField : public Object {
  public:
-  Class* GetDeclaringClass() const;
+  Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint32_t GetAccessFlags() const;
+  uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetAccessFlags(uint32_t new_access_flags) {
+  void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, access_flags_), new_access_flags, false);
   }
 
-  bool IsPublic() const {
+  bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
-  bool IsStatic() const {
+  bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccStatic) != 0;
   }
 
-  bool IsFinal() const {
+  bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  uint32_t GetDexFieldIndex() const {
+  uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, field_dex_idx_), false);
   }
 
-  void SetDexFieldIndex(uint32_t new_idx) {
+  void SetDexFieldIndex(uint32_t new_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, field_dex_idx_), new_idx, false);
   }
 
-  // Offset to field within an Object
-  MemberOffset GetOffset() const;
+  // Offset to field within an Object.
+  MemberOffset GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static MemberOffset OffsetOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
   }
 
-  MemberOffset GetOffsetDuringLinking() const;
+  MemberOffset GetOffsetDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetOffset(MemberOffset num_bytes);
+  void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // field access, null object for static fields
-  bool GetBoolean(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetBoolean(Object* object, bool z) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  int8_t GetByte(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetByte(Object* object, int8_t b) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  uint16_t GetChar(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetChar(Object* object, uint16_t c) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  int16_t GetShort(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetShort(Object* object, int16_t s) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  int32_t GetInt(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetInt(Object* object, int32_t i) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  int64_t GetLong(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetLong(Object* object, int64_t j) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  float GetFloat(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetFloat(Object* object, float f) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  double GetDouble(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetDouble(Object* object, double d) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  Object* GetObject(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetObject(Object* object, const Object* l) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool GetBoolean(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetBoolean(Object* object, bool z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int8_t GetByte(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetByte(Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint16_t GetChar(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetChar(Object* object, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int16_t GetShort(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetShort(Object* object, int16_t s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetInt(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetInt(Object* object, int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int64_t GetLong(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetLong(Object* object, int64_t j) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  float GetFloat(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetFloat(Object* object, float f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  double GetDouble(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetDouble(Object* object, double d) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Object* GetObject(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetObject(Object* object, Object* l) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // raw field accesses
-  uint32_t Get32(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void Set32(Object* object, uint32_t new_value) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  uint64_t Get64(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void Set64(Object* object, uint64_t new_value) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  Object* GetObj(const Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetObj(Object* object, const Object* new_value) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Raw field accesses.
+  uint32_t Get32(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Set32(Object* object, uint32_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint64_t Get64(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Set64(Object* object, uint64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Object* GetObj(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetObj(Object* object, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static Class* GetJavaLangReflectArtField() {
     DCHECK(java_lang_reflect_ArtField_ != NULL);
@@ -133,14 +109,14 @@
   static void VisitRoots(RootVisitor* visitor, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsVolatile() const {
+  bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccVolatile) != 0;
   }
 
  private:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
   // The class we are a part of
-  Class* declaring_class_;
+  HeapReference<Class> declaring_class_;
 
   uint32_t access_flags_;
 
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 088f616..8ef3be8 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -27,8 +27,9 @@
 namespace art {
 namespace mirror {
 
-inline Class* ArtMethod::GetDeclaringClass() const {
-  Class* result = GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_), false);
+inline Class* ArtMethod::GetDeclaringClass() {
+  Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_),
+                                        false);
   DCHECK(result != NULL) << this;
   DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this;
   return result;
@@ -38,44 +39,44 @@
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_), new_declaring_class, false);
 }
 
-inline uint32_t ArtMethod::GetAccessFlags() const {
+inline uint32_t ArtMethod::GetAccessFlags() {
   DCHECK(GetDeclaringClass()->IsIdxLoaded() || GetDeclaringClass()->IsErroneous());
   return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_), false);
 }
 
-inline uint16_t ArtMethod::GetMethodIndex() const {
+inline uint16_t ArtMethod::GetMethodIndex() {
   DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous());
   return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_), false);
 }
 
-inline uint32_t ArtMethod::GetDexMethodIndex() const {
+inline uint32_t ArtMethod::GetDexMethodIndex() {
 #ifdef ART_SEA_IR_MODE
   // TODO: Re-add this check for (PORTABLE + SMALL + ) SEA IR when PORTABLE IS fixed!
   // DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
 #else
   DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
 #endif
-  return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_dex_index_), false);
+  return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_), false);
 }
 
-inline ObjectArray<String>* ArtMethod::GetDexCacheStrings() const {
-  return GetFieldObject<ObjectArray<String>*>(
+inline ObjectArray<String>* ArtMethod::GetDexCacheStrings() {
+  return GetFieldObject<ObjectArray<String> >(
       OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_), false);
 }
 
-inline ObjectArray<ArtMethod>* ArtMethod::GetDexCacheResolvedMethods() const {
-  return GetFieldObject<ObjectArray<ArtMethod>*>(
+inline ObjectArray<ArtMethod>* ArtMethod::GetDexCacheResolvedMethods() {
+  return GetFieldObject<ObjectArray<ArtMethod> >(
       OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_), false);
 }
 
-inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() const {
-  return GetFieldObject<ObjectArray<Class>*>(
+inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() {
+  return GetFieldObject<ObjectArray<Class> >(
       OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_), false);
 }
 
-inline uint32_t ArtMethod::GetCodeSize() const {
+inline uint32_t ArtMethod::GetCodeSize() {
   DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
-  uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromCompiledCode());
+  uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode());
   if (code == 0) {
     return 0;
   }
@@ -106,7 +107,7 @@
   }
 }
 
-inline void ArtMethod::AssertPcIsWithinCode(uintptr_t pc) const {
+inline void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
   if (!kIsDebugBuild) {
     return;
   }
@@ -116,34 +117,44 @@
   if (pc == GetQuickInstrumentationExitPc()) {
     return;
   }
-  const void* code = GetEntryPointFromCompiledCode();
-  if (code == GetCompiledCodeToInterpreterBridge() || code == GetQuickInstrumentationEntryPoint()) {
+  const void* code = GetEntryPointFromQuickCompiledCode();
+  if (code == GetQuickToInterpreterBridge() || code == GetQuickInstrumentationEntryPoint()) {
     return;
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  if (code == GetResolutionTrampoline(class_linker)) {
+  if (code == GetQuickResolutionTrampoline(class_linker)) {
     return;
   }
-  DCHECK(IsWithinCode(pc))
+  DCHECK(IsWithinQuickCode(pc))
       << PrettyMethod(this)
       << " pc=" << std::hex << pc
       << " code=" << code
       << " size=" << GetCodeSize();
 }
 
-inline uint32_t ArtMethod::GetOatCodeOffset() const {
+inline uint32_t ArtMethod::GetQuickOatCodeOffset() {
   DCHECK(!Runtime::Current()->IsStarted());
-  return reinterpret_cast<uint32_t>(GetEntryPointFromCompiledCode());
+  return PointerToLowMemUInt32(GetEntryPointFromQuickCompiledCode());
 }
 
-inline void ArtMethod::SetOatCodeOffset(uint32_t code_offset) {
+inline uint32_t ArtMethod::GetPortableOatCodeOffset() {
   DCHECK(!Runtime::Current()->IsStarted());
-  SetEntryPointFromCompiledCode(reinterpret_cast<void*>(code_offset));
+  return PointerToLowMemUInt32(GetEntryPointFromPortableCompiledCode());
 }
 
-inline uint32_t ArtMethod::GetOatMappingTableOffset() const {
+inline void ArtMethod::SetQuickOatCodeOffset(uint32_t code_offset) {
   DCHECK(!Runtime::Current()->IsStarted());
-  return reinterpret_cast<uint32_t>(GetMappingTable());
+  SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(code_offset));
+}
+
+inline void ArtMethod::SetPortableOatCodeOffset(uint32_t code_offset) {
+  DCHECK(!Runtime::Current()->IsStarted());
+  SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(code_offset));
+}
+
+inline uint32_t ArtMethod::GetOatMappingTableOffset() {
+  DCHECK(!Runtime::Current()->IsStarted());
+  return PointerToLowMemUInt32(GetMappingTable());
 }
 
 inline void ArtMethod::SetOatMappingTableOffset(uint32_t mapping_table_offset) {
@@ -151,9 +162,9 @@
   SetMappingTable(reinterpret_cast<const uint8_t*>(mapping_table_offset));
 }
 
-inline uint32_t ArtMethod::GetOatVmapTableOffset() const {
+inline uint32_t ArtMethod::GetOatVmapTableOffset() {
   DCHECK(!Runtime::Current()->IsStarted());
-  return reinterpret_cast<uint32_t>(GetVmapTable());
+  return PointerToLowMemUInt32(GetVmapTable());
 }
 
 inline void ArtMethod::SetOatVmapTableOffset(uint32_t vmap_table_offset) {
@@ -166,16 +177,16 @@
   SetNativeGcMap(reinterpret_cast<uint8_t*>(gc_map_offset));
 }
 
-inline uint32_t ArtMethod::GetOatNativeGcMapOffset() const {
+inline uint32_t ArtMethod::GetOatNativeGcMapOffset() {
   DCHECK(!Runtime::Current()->IsStarted());
-  return reinterpret_cast<uint32_t>(GetNativeGcMap());
+  return PointerToLowMemUInt32(GetNativeGcMap());
 }
 
-inline bool ArtMethod::IsRuntimeMethod() const {
+inline bool ArtMethod::IsRuntimeMethod() {
   return GetDexMethodIndex() == DexFile::kDexNoIndex;
 }
 
-inline bool ArtMethod::IsCalleeSaveMethod() const {
+inline bool ArtMethod::IsCalleeSaveMethod() {
   if (!IsRuntimeMethod()) {
     return false;
   }
@@ -190,14 +201,14 @@
   return result;
 }
 
-inline bool ArtMethod::IsResolutionMethod() const {
+inline bool ArtMethod::IsResolutionMethod() {
   bool result = this == Runtime::Current()->GetResolutionMethod();
   // Check that if we do think it is phony it looks like the resolution method.
   DCHECK(!result || IsRuntimeMethod());
   return result;
 }
 
-inline bool ArtMethod::IsImtConflictMethod() const {
+inline bool ArtMethod::IsImtConflictMethod() {
   bool result = this == Runtime::Current()->GetImtConflictMethod();
   // Check that if we do think it is phony it looks like the imt conflict method.
   DCHECK(!result || IsRuntimeMethod());
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index f4a076c..575ea03 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -47,7 +47,7 @@
   }
 }
 
-InvokeType ArtMethod::GetInvokeType() const {
+InvokeType ArtMethod::GetInvokeType() {
   // TODO: kSuper?
   if (GetDeclaringClass()->IsInterface()) {
     return kInterface;
@@ -100,11 +100,11 @@
   return num_registers;
 }
 
-bool ArtMethod::IsProxyMethod() const {
+bool ArtMethod::IsProxyMethod() {
   return GetDeclaringClass()->IsProxyClass();
 }
 
-ArtMethod* ArtMethod::FindOverriddenMethod() const {
+ArtMethod* ArtMethod::FindOverriddenMethod() {
   if (IsStatic()) {
     return NULL;
   }
@@ -147,13 +147,16 @@
   return result;
 }
 
-uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc) const {
+uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc) {
   const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
   return pc - reinterpret_cast<uintptr_t>(code);
 }
 
-uint32_t ArtMethod::ToDexPc(const uintptr_t pc) const {
-#if !defined(ART_USE_PORTABLE_COMPILER)
+uint32_t ArtMethod::ToDexPc(const uintptr_t pc) {
+  if (IsPortableCompiled()) {
+    // Portable doesn't use the machine pc, we just use dex pc instead.
+    return static_cast<uint32_t>(pc);
+  }
   MappingTable table(GetMappingTable());
   if (table.TotalSize() == 0) {
     DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
@@ -176,16 +179,12 @@
     }
   }
   LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
-             << "(PC " << reinterpret_cast<void*>(pc) << ", code=" << code
-             << ") in " << PrettyMethod(this);
+                     << "(PC " << reinterpret_cast<void*>(pc) << ", code=" << code
+                     << ") in " << PrettyMethod(this);
   return DexFile::kDexNoIndex;
-#else
-  // Compiler LLVM doesn't use the machine pc, we just use dex pc instead.
-  return static_cast<uint32_t>(pc);
-#endif
 }
 
-uintptr_t ArtMethod::ToNativePc(const uint32_t dex_pc) const {
+uintptr_t ArtMethod::ToNativePc(const uint32_t dex_pc) {
   MappingTable table(GetMappingTable());
   if (table.TotalSize() == 0) {
     DCHECK_EQ(dex_pc, 0U);
@@ -213,7 +212,7 @@
 }
 
 uint32_t ArtMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc,
-                                   bool* has_no_move_exception) const {
+                                   bool* has_no_move_exception) {
   MethodHelper mh(this);
   const DexFile::CodeItem* code_item = mh.GetCodeItem();
   // Default to handler not found.
@@ -265,16 +264,21 @@
     }
   } else {
     const bool kLogInvocationStartAndReturn = false;
-    if (GetEntryPointFromCompiledCode() != NULL) {
+    bool have_quick_code = GetEntryPointFromQuickCompiledCode() != nullptr;
+    bool have_portable_code = GetEntryPointFromPortableCompiledCode() != nullptr;
+    if (LIKELY(have_quick_code || have_portable_code)) {
       if (kLogInvocationStartAndReturn) {
-        LOG(INFO) << StringPrintf("Invoking '%s' code=%p", PrettyMethod(this).c_str(), GetEntryPointFromCompiledCode());
+        LOG(INFO) << StringPrintf("Invoking '%s' %s code=%p", PrettyMethod(this).c_str(),
+                                  have_quick_code ? "quick" : "portable",
+                                  have_quick_code ? GetEntryPointFromQuickCompiledCode()
+                                                  : GetEntryPointFromPortableCompiledCode());
       }
-#ifdef ART_USE_PORTABLE_COMPILER
-      (*art_portable_invoke_stub)(this, args, args_size, self, result, result_type);
-#else
-      (*art_quick_invoke_stub)(this, args, args_size, self, result, result_type);
-#endif
-      if (UNLIKELY(reinterpret_cast<int32_t>(self->GetException(NULL)) == -1)) {
+      if (!IsPortableCompiled()) {
+        (*art_quick_invoke_stub)(this, args, args_size, self, result, result_type);
+      } else {
+        (*art_portable_invoke_stub)(this, args, args_size, self, result, result_type);
+      }
+      if (UNLIKELY(reinterpret_cast<intptr_t>(self->GetException(NULL)) == -1)) {
         // Unusual case where we were running LLVM generated code and an
         // exception was thrown to force the activations to be removed from the
         // stack. Continue execution in the interpreter.
@@ -285,11 +289,13 @@
         interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result);
       }
       if (kLogInvocationStartAndReturn) {
-        LOG(INFO) << StringPrintf("Returned '%s' code=%p", PrettyMethod(this).c_str(), GetEntryPointFromCompiledCode());
+        LOG(INFO) << StringPrintf("Returned '%s' %s code=%p", PrettyMethod(this).c_str(),
+                                  have_quick_code ? "quick" : "portable",
+                                  have_quick_code ? GetEntryPointFromQuickCompiledCode()
+                                                  : GetEntryPointFromPortableCompiledCode());
       }
     } else {
-      LOG(INFO) << "Not invoking '" << PrettyMethod(this)
-          << "' code=" << reinterpret_cast<const void*>(GetEntryPointFromCompiledCode());
+      LOG(INFO) << "Not invoking '" << PrettyMethod(this) << "' code=null";
       if (result != NULL) {
         result->SetJ(0);
       }
@@ -300,9 +306,10 @@
   self->PopManagedStackFragment(fragment);
 }
 
-bool ArtMethod::IsRegistered() const {
-  void* native_method = GetFieldPtr<void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, native_method_), false);
-  CHECK(native_method != NULL);
+bool ArtMethod::IsRegistered() {
+  void* native_method =
+      GetFieldPtr<void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), false);
+  CHECK(native_method != nullptr);
   void* jni_stub = GetJniDlsymLookupStub();
   return native_method != jni_stub;
 }
@@ -323,7 +330,7 @@
     // around JNI bugs, that include not giving Object** SIRT references to native methods. Direct
     // the native method to runtime support and store the target somewhere runtime support will
     // find it.
-#if defined(__i386__)
+#if defined(__i386__) || defined(__x86_64__)
     UNIMPLEMENTED(FATAL);
 #else
     SetNativeMethod(reinterpret_cast<void*>(art_work_around_app_jni_bugs));
@@ -340,7 +347,7 @@
 }
 
 void ArtMethod::SetNativeMethod(const void* native_method) {
-  SetFieldPtr<const void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, native_method_),
+  SetFieldPtr<const void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_),
       native_method, false);
 }
 
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 95ca4c9..bfa7cbe 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -45,7 +45,7 @@
 // C++ mirror of java.lang.reflect.Method and java.lang.reflect.Constructor
 class MANAGED ArtMethod : public Object {
  public:
-  Class* GetDeclaringClass() const;
+  Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -53,41 +53,37 @@
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
   }
 
-  static MemberOffset EntryPointFromCompiledCodeOffset() {
-    return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_compiled_code_));
-  }
+  uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint32_t GetAccessFlags() const;
-
-  void SetAccessFlags(uint32_t new_access_flags) {
+  void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_), new_access_flags, false);
   }
 
   // Approximate what kind of method call would be used for this method.
-  InvokeType GetInvokeType() const;
+  InvokeType GetInvokeType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns true if the method is declared public.
-  bool IsPublic() const {
+  bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
   // Returns true if the method is declared private.
-  bool IsPrivate() const {
+  bool IsPrivate() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPrivate) != 0;
   }
 
   // Returns true if the method is declared static.
-  bool IsStatic() const {
+  bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccStatic) != 0;
   }
 
   // Returns true if the method is a constructor.
-  bool IsConstructor() const {
+  bool IsConstructor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccConstructor) != 0;
   }
 
   // Returns true if the method is static, private, or a constructor.
-  bool IsDirect() const {
+  bool IsDirect() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return IsDirect(GetAccessFlags());
   }
 
@@ -96,55 +92,70 @@
   }
 
   // Returns true if the method is declared synchronized.
-  bool IsSynchronized() const {
+  bool IsSynchronized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
     return (GetAccessFlags() & synchonized) != 0;
   }
 
-  bool IsFinal() const {
+  bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  bool IsMiranda() const {
+  bool IsMiranda() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccMiranda) != 0;
   }
 
-  bool IsNative() const {
+  bool IsNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccNative) != 0;
   }
 
-  bool IsFastNative() const {
+  bool IsFastNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     uint32_t mask = kAccFastNative | kAccNative;
     return (GetAccessFlags() & mask) == mask;
   }
 
-  bool IsAbstract() const {
+  bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAbstract) != 0;
   }
 
-  bool IsSynthetic() const {
+  bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
-  bool IsProxyMethod() const;
+  bool IsProxyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsPreverified() const {
+  bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPreverified) != 0;
   }
 
-  void SetPreverified() {
+  void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(!IsPreverified());
     SetAccessFlags(GetAccessFlags() | kAccPreverified);
   }
 
+  bool IsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return (GetAccessFlags() & kAccPortableCompiled) != 0;
+  }
+
+  void SetIsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(!IsPortableCompiled());
+    SetAccessFlags(GetAccessFlags() | kAccPortableCompiled);
+  }
+
+  void ClearIsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(IsPortableCompiled());
+    SetAccessFlags(GetAccessFlags() & ~kAccPortableCompiled);
+  }
+
   bool CheckIncompatibleClassChange(InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint16_t GetMethodIndex() const;
+  uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t GetVtableIndex() const {
+  size_t GetVtableIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetMethodIndex();
   }
 
-  void SetMethodIndex(uint16_t new_method_index) {
+  void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_), new_method_index, false);
   }
 
@@ -152,24 +163,24 @@
     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
   }
 
-  uint32_t GetCodeItemOffset() const {
-    return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, code_item_offset_), false);
+  uint32_t GetCodeItemOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_), false);
   }
 
   void SetCodeItemOffset(uint32_t new_code_off) {
-    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, code_item_offset_), new_code_off, false);
+    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_), new_code_off, false);
   }
 
   // Number of 32bit registers that would be required to hold all the arguments
   static size_t NumArgRegisters(const StringPiece& shorty);
 
-  uint32_t GetDexMethodIndex() const;
+  uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDexMethodIndex(uint32_t new_idx) {
-    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_dex_index_), new_idx, false);
+    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_), new_idx, false);
   }
 
-  ObjectArray<String>* GetDexCacheStrings() const;
+  ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -185,41 +196,62 @@
     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
   }
 
-  ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() const;
+  ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<Class>* GetDexCacheResolvedTypes() const;
+  ObjectArray<Class>* GetDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_types)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Find the method that this method overrides
-  ArtMethod* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* FindOverriddenMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, char result_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  EntryPointFromInterpreter* GetEntryPointFromInterpreter() const {
-    return GetFieldPtr<EntryPointFromInterpreter*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_), false);
+  EntryPointFromInterpreter* GetEntryPointFromInterpreter() {
+    return GetFieldPtr<EntryPointFromInterpreter*>(
+               OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_), false);
   }
 
   void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) {
-    SetFieldPtr<EntryPointFromInterpreter*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_), entry_point_from_interpreter, false);
+    SetFieldPtr<EntryPointFromInterpreter*>(
+        OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_),
+        entry_point_from_interpreter, false);
   }
 
-  const void* GetEntryPointFromCompiledCode() const {
-    return GetFieldPtr<const void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_compiled_code_), false);
+  static MemberOffset EntryPointFromPortableCompiledCodeOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_portable_compiled_code_));
   }
 
-  void SetEntryPointFromCompiledCode(const void* entry_point_from_compiled_code) {
-    SetFieldPtr<const void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_compiled_code_), entry_point_from_compiled_code, false);
+  const void* GetEntryPointFromPortableCompiledCode() {
+    return GetFieldPtr<const void*>(EntryPointFromPortableCompiledCodeOffset(), false);
   }
 
-  uint32_t GetCodeSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code) {
+    SetFieldPtr<const void*>(EntryPointFromPortableCompiledCodeOffset(),
+        entry_point_from_portable_compiled_code, false);
+  }
 
-  bool IsWithinCode(uintptr_t pc) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromCompiledCode());
+  static MemberOffset EntryPointFromQuickCompiledCodeOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_quick_compiled_code_));
+  }
+
+  const void* GetEntryPointFromQuickCompiledCode() {
+    return GetFieldPtr<const void*>(EntryPointFromQuickCompiledCodeOffset(), false);
+  }
+
+  void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) {
+    SetFieldPtr<const void*>(EntryPointFromQuickCompiledCodeOffset(),
+        entry_point_from_quick_compiled_code, false);
+  }
+
+
+  uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  bool IsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode());
     if (code == 0) {
       return pc == 0;
     }
@@ -231,45 +263,44 @@
     return (code <= pc && pc <= code + GetCodeSize());
   }
 
-  void AssertPcIsWithinCode(uintptr_t pc) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint32_t GetOatCodeOffset() const;
-
-  void SetOatCodeOffset(uint32_t code_offset);
-
-  static MemberOffset GetEntryPointFromCompiledCodeOffset() {
-    return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_compiled_code_);
-  }
+  uint32_t GetQuickOatCodeOffset();
+  uint32_t GetPortableOatCodeOffset();
+  void SetQuickOatCodeOffset(uint32_t code_offset);
+  void SetPortableOatCodeOffset(uint32_t code_offset);
 
   // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
-  const uint8_t* GetMappingTable() const {
-    return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, mapping_table_), false);
+  const uint8_t* GetMappingTable() {
+    return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_mapping_table_),
+        false);
   }
 
   void SetMappingTable(const uint8_t* mapping_table) {
-    SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, mapping_table_),
-                                 mapping_table, false);
+    SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_mapping_table_),
+                                mapping_table, false);
   }
 
-  uint32_t GetOatMappingTableOffset() const;
+  uint32_t GetOatMappingTableOffset();
 
   void SetOatMappingTableOffset(uint32_t mapping_table_offset);
 
   // Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
-  const uint8_t* GetVmapTable() const {
-    return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, vmap_table_), false);
+  const uint8_t* GetVmapTable() {
+    return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_vmap_table_),
+        false);
   }
 
   void SetVmapTable(const uint8_t* vmap_table) {
-    SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, vmap_table_), vmap_table, false);
+    SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_vmap_table_), vmap_table,
+        false);
   }
 
-  uint32_t GetOatVmapTableOffset() const;
+  uint32_t GetOatVmapTableOffset();
 
   void SetOatVmapTableOffset(uint32_t vmap_table_offset);
 
-  const uint8_t* GetNativeGcMap() const {
+  const uint8_t* GetNativeGcMap() {
     return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), false);
   }
   void SetNativeGcMap(const uint8_t* data) {
@@ -278,31 +309,30 @@
 
   // When building the oat need a convenient place to stuff the offset of the native GC map.
   void SetOatNativeGcMapOffset(uint32_t gc_map_offset);
-  uint32_t GetOatNativeGcMapOffset() const;
+  uint32_t GetOatNativeGcMapOffset();
 
-  size_t GetFrameSizeInBytes() const {
+  size_t GetFrameSizeInBytes() {
     DCHECK_EQ(sizeof(size_t), sizeof(uint32_t));
-    size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, frame_size_in_bytes_), false);
+    size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_), false);
     DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
     return result;
   }
 
   void SetFrameSizeInBytes(size_t new_frame_size_in_bytes) {
-    DCHECK_EQ(sizeof(size_t), sizeof(uint32_t));
-    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, frame_size_in_bytes_),
+    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_),
                new_frame_size_in_bytes, false);
   }
 
-  size_t GetReturnPcOffsetInBytes() const {
+  size_t GetReturnPcOffsetInBytes() {
     return GetFrameSizeInBytes() - kPointerSize;
   }
 
-  size_t GetSirtOffsetInBytes() const {
+  size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     CHECK(IsNative());
     return kPointerSize;
   }
 
-  bool IsRegistered() const;
+  bool IsRegistered();
 
   void RegisterNative(Thread* self, const void* native_method, bool is_fast)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -310,10 +340,10 @@
   void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static MemberOffset NativeMethodOffset() {
-    return OFFSET_OF_OBJECT_MEMBER(ArtMethod, native_method_);
+    return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_);
   }
 
-  const void* GetNativeMethod() const {
+  const void* GetNativeMethod() {
     return reinterpret_cast<const void*>(GetField32(NativeMethodOffset(), false));
   }
 
@@ -323,47 +353,47 @@
     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
   }
 
-  uint32_t GetCoreSpillMask() const {
-    return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, core_spill_mask_), false);
+  uint32_t GetCoreSpillMask() {
+    return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_), false);
   }
 
   void SetCoreSpillMask(uint32_t core_spill_mask) {
     // Computed during compilation
-    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, core_spill_mask_), core_spill_mask, false);
+    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_), core_spill_mask, false);
   }
 
-  uint32_t GetFpSpillMask() const {
-    return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, fp_spill_mask_), false);
+  uint32_t GetFpSpillMask() {
+    return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_), false);
   }
 
   void SetFpSpillMask(uint32_t fp_spill_mask) {
     // Computed during compilation
-    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, fp_spill_mask_), fp_spill_mask, false);
+    SetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_), fp_spill_mask, false);
   }
 
   // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal
   // conventions for a method of managed code. Returns false for Proxy methods.
-  bool IsRuntimeMethod() const;
+  bool IsRuntimeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Is this a hand crafted method used for something like describing callee saves?
-  bool IsCalleeSaveMethod() const;
+  bool IsCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsResolutionMethod() const;
+  bool IsResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsImtConflictMethod() const;
+  bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uintptr_t NativePcOffset(const uintptr_t pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uintptr_t NativePcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Converts a native PC to a dex PC.
-  uint32_t ToDexPc(const uintptr_t pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t ToDexPc(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Converts a dex PC to a native PC.
-  uintptr_t ToNativePc(const uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uintptr_t ToNativePc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Find the catch block for the given exception type and dex_pc. When a catch block is found,
   // indicates whether the found catch block is responsible for clearing the exception or whether
   // a move-exception instruction is present.
-  uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc, bool* has_no_move_exception) const
+  uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc, bool* has_no_move_exception)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static void SetClass(Class* java_lang_reflect_ArtMethod);
@@ -379,65 +409,83 @@
 
  protected:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
-  // The class we are a part of
-  Class* declaring_class_;
+  // The class we are a part of.
+  HeapReference<Class> declaring_class_;
 
-  // short cuts to declaring_class_->dex_cache_ member for fast compiled code access
-  ObjectArray<ArtMethod>* dex_cache_resolved_methods_;
+  // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
+  HeapReference<ObjectArray<ArtMethod> > dex_cache_resolved_methods_;
 
-  // short cuts to declaring_class_->dex_cache_ member for fast compiled code access
-  ObjectArray<Class>* dex_cache_resolved_types_;
+  // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
+  HeapReference<ObjectArray<Class> > dex_cache_resolved_types_;
 
-  // short cuts to declaring_class_->dex_cache_ member for fast compiled code access
-  ObjectArray<String>* dex_cache_strings_;
+  // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
+  HeapReference<ObjectArray<String> > dex_cache_strings_;
 
-  // Access flags; low 16 bits are defined by spec.
-  uint32_t access_flags_;
+  // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
+  // compiled code.
+  uint64_t entry_point_from_interpreter_;
 
-  // Offset to the CodeItem.
-  uint32_t code_item_offset_;
+  // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
+  uint64_t entry_point_from_jni_;
 
-  // Architecture-dependent register spill mask
-  uint32_t core_spill_mask_;
+  // Method dispatch from portable compiled code invokes this pointer which may cause bridging into
+  // quick compiled code or the interpreter.
+  uint64_t entry_point_from_portable_compiled_code_;
 
-  // Compiled code associated with this method for callers from managed code.
-  // May be compiled managed code or a bridge for invoking a native method.
-  // TODO: Break apart this into portable and quick.
-  const void* entry_point_from_compiled_code_;
+  // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
+  // portable compiled code or the interpreter.
+  uint64_t entry_point_from_quick_compiled_code_;
 
-  // Called by the interpreter to execute this method.
-  EntryPointFromInterpreter* entry_point_from_interpreter_;
+  // Pointer to a data structure created by the compiler and used by the garbage collector to
+  // determine which registers hold live references to objects within the heap. Keyed by native PC
+  // offsets for the quick compiler and dex PCs for the portable.
+  uint64_t gc_map_;
 
-  // Architecture-dependent register spill mask
-  uint32_t fp_spill_mask_;
+  // --- Quick compiler meta-data. ---
+  // TODO: merge and place in native heap, such as done with the code size.
 
-  // Total size in bytes of the frame
-  size_t frame_size_in_bytes_;
-
-  // Garbage collection map of native PC offsets (quick) or dex PCs (portable) to reference bitmaps.
-  const uint8_t* gc_map_;
-
-  // Mapping from native pc to dex pc
-  const uint32_t* mapping_table_;
-
-  // Index into method_ids of the dex file associated with this method
-  uint32_t method_dex_index_;
-
-  // For concrete virtual methods, this is the offset of the method in Class::vtable_.
-  //
-  // For abstract methods in an interface class, this is the offset of the method in
-  // "iftable_->Get(n)->GetMethodArray()".
-  //
-  // For static and direct methods this is the index in the direct methods table.
-  uint32_t method_index_;
-
-  // The target native method registered with this method
-  const void* native_method_;
+  // Pointer to a data structure created by the quick compiler to map between dex PCs and native
+  // PCs, and vice-versa.
+  uint64_t quick_mapping_table_;
 
   // When a register is promoted into a register, the spill mask holds which registers hold dex
   // registers. The first promoted register's corresponding dex register is vmap_table_[1], the Nth
   // is vmap_table_[N]. vmap_table_[0] holds the length of the table.
-  const uint16_t* vmap_table_;
+  uint64_t quick_vmap_table_;
+
+  // --- End of quick compiler meta-data. ---
+
+  // Access flags; low 16 bits are defined by spec.
+  uint32_t access_flags_;
+
+  /* Dex file fields. The defining dex file is available via declaring_class_->dex_cache_ */
+
+  // Offset to the CodeItem.
+  uint32_t dex_code_item_offset_;
+
+  // Index into method_ids of the dex file associated with this method.
+  uint32_t dex_method_index_;
+
+  /* End of dex file fields. */
+
+  // Entry within a dispatch table for this method. For static/direct methods the index is into
+  // the declaringClass.directMethods, for virtual methods the vtable and for interface methods the
+  // ifTable.
+  uint32_t method_index_;
+
+  // --- Quick compiler meta-data. ---
+  // TODO: merge and place in native heap, such as done with the code size.
+
+  // Bit map of spilled machine registers.
+  uint32_t quick_core_spill_mask_;
+
+  // Bit map of spilled floating point machine registers.
+  uint32_t quick_fp_spill_mask_;
+
+  // Fixed frame size for this method when executed.
+  uint32_t quick_frame_size_in_bytes_;
+
+  // --- End of quick compiler meta-data. ---
 
   static Class* java_lang_reflect_ArtMethod_;
 
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 8ddaeb2..a5f743b 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -33,63 +33,61 @@
 namespace art {
 namespace mirror {
 
-inline size_t Class::GetObjectSize() const {
+inline uint32_t Class::GetObjectSize() {
   DCHECK(!IsVariableSize()) << " class=" << PrettyTypeOf(this);
-  DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
   return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false);
 }
 
-inline Class* Class::GetSuperClass() const {
+inline Class* Class::GetSuperClass() {
   // Can only get super class for loaded classes (hack for when runtime is
   // initializing)
   DCHECK(IsLoaded() || !Runtime::Current()->IsStarted()) << IsLoaded();
-  return GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), false);
+  return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), false);
 }
 
-inline ClassLoader* Class::GetClassLoader() const {
-  return GetFieldObject<ClassLoader*>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), false);
+inline ClassLoader* Class::GetClassLoader() {
+  return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), false);
 }
 
-inline DexCache* Class::GetDexCache() const {
-  return GetFieldObject<DexCache*>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), false);
+inline DexCache* Class::GetDexCache() {
+  return GetFieldObject<DexCache>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), false);
 }
 
-inline ObjectArray<ArtMethod>* Class::GetDirectMethods() const {
+inline ObjectArray<ArtMethod>* Class::GetDirectMethods() {
   DCHECK(IsLoaded() || IsErroneous());
-  return GetFieldObject<ObjectArray<ArtMethod>*>(
+  return GetFieldObject<ObjectArray<ArtMethod> >(
       OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false);
 }
 
 inline void Class::SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(NULL == GetFieldObject<ObjectArray<ArtMethod>*>(
+  DCHECK(NULL == GetFieldObject<ObjectArray<ArtMethod> >(
       OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false));
   DCHECK_NE(0, new_direct_methods->GetLength());
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_),
                  new_direct_methods, false);
 }
 
-inline ArtMethod* Class::GetDirectMethod(int32_t i) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline ArtMethod* Class::GetDirectMethod(int32_t i) {
   return GetDirectMethods()->Get(i);
 }
 
 inline void Class::SetDirectMethod(uint32_t i, ArtMethod* f)  // TODO: uint16_t
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ObjectArray<ArtMethod>* direct_methods =
-      GetFieldObject<ObjectArray<ArtMethod>*>(
+      GetFieldObject<ObjectArray<ArtMethod> >(
           OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false);
   direct_methods->Set(i, f);
 }
 
 // Returns the number of static, private, and constructor methods.
-inline size_t Class::NumDirectMethods() const {
+inline uint32_t Class::NumDirectMethods() {
   return (GetDirectMethods() != NULL) ? GetDirectMethods()->GetLength() : 0;
 }
 
-inline ObjectArray<ArtMethod>* Class::GetVirtualMethods() const {
+inline ObjectArray<ArtMethod>* Class::GetVirtualMethods() {
   DCHECK(IsLoaded() || IsErroneous());
-  return GetFieldObject<ObjectArray<ArtMethod>*>(
+  return GetFieldObject<ObjectArray<ArtMethod> >(
       OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false);
 }
 
@@ -101,18 +99,16 @@
                  new_virtual_methods, false);
 }
 
-inline size_t Class::NumVirtualMethods() const {
+inline uint32_t Class::NumVirtualMethods() {
   return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0;
 }
 
-inline ArtMethod* Class::GetVirtualMethod(uint32_t i) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline ArtMethod* Class::GetVirtualMethod(uint32_t i) {
   DCHECK(IsResolved() || IsErroneous());
   return GetVirtualMethods()->Get(i);
 }
 
-inline ArtMethod* Class::GetVirtualMethodDuringLinking(uint32_t i) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline ArtMethod* Class::GetVirtualMethodDuringLinking(uint32_t i) {
   DCHECK(IsLoaded() || IsErroneous());
   return GetVirtualMethods()->Get(i);
 }
@@ -120,35 +116,34 @@
 inline void Class::SetVirtualMethod(uint32_t i, ArtMethod* f)  // TODO: uint16_t
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ObjectArray<ArtMethod>* virtual_methods =
-      GetFieldObject<ObjectArray<ArtMethod>*>(
+      GetFieldObject<ObjectArray<ArtMethod> >(
           OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false);
   virtual_methods->Set(i, f);
 }
 
-inline ObjectArray<ArtMethod>* Class::GetVTable() const {
+inline ObjectArray<ArtMethod>* Class::GetVTable() {
   DCHECK(IsResolved() || IsErroneous());
-  return GetFieldObject<ObjectArray<ArtMethod>*>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false);
+  return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false);
 }
 
-inline ObjectArray<ArtMethod>* Class::GetVTableDuringLinking() const {
+inline ObjectArray<ArtMethod>* Class::GetVTableDuringLinking() {
   DCHECK(IsLoaded() || IsErroneous());
-  return GetFieldObject<ObjectArray<ArtMethod>*>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false);
+  return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false);
 }
 
-inline void Class::SetVTable(ObjectArray<ArtMethod>* new_vtable)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline void Class::SetVTable(ObjectArray<ArtMethod>* new_vtable) {
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable, false);
 }
 
-inline ObjectArray<ArtMethod>* Class::GetImTable() const {
-  return GetFieldObject<ObjectArray<ArtMethod>*>(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), false);
+inline ObjectArray<ArtMethod>* Class::GetImTable() {
+  return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), false);
 }
 
 inline void Class::SetImTable(ObjectArray<ArtMethod>* new_imtable) {
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), new_imtable, false);
 }
 
-inline bool Class::Implements(const Class* klass) const {
+inline bool Class::Implements(Class* klass) {
   DCHECK(klass != NULL);
   DCHECK(klass->IsInterface()) << PrettyClass(this);
   // All interfaces implemented directly and by our superclass, and
@@ -183,13 +178,13 @@
 // Don't forget about primitive types.
 //   Object[]         = int[] --> false
 //
-inline bool Class::IsArrayAssignableFromArray(const Class* src) const {
+inline bool Class::IsArrayAssignableFromArray(Class* src) {
   DCHECK(IsArrayClass())  << PrettyClass(this);
   DCHECK(src->IsArrayClass()) << PrettyClass(src);
   return GetComponentType()->IsAssignableFrom(src->GetComponentType());
 }
 
-inline bool Class::IsAssignableFromArray(const Class* src) const {
+inline bool Class::IsAssignableFromArray(Class* src) {
   DCHECK(!IsInterface()) << PrettyClass(this);  // handled first in IsAssignableFrom
   DCHECK(src->IsArrayClass()) << PrettyClass(src);
   if (!IsArrayClass()) {
@@ -205,13 +200,13 @@
 
 template <bool throw_on_failure, bool use_referrers_cache>
 inline bool Class::ResolvedFieldAccessTest(Class* access_to, ArtField* field,
-                                           uint32_t field_idx, const DexCache* dex_cache) {
+                                           uint32_t field_idx, DexCache* dex_cache) {
   DCHECK_EQ(use_referrers_cache, dex_cache == nullptr);
   if (UNLIKELY(!this->CanAccess(access_to))) {
     // The referrer class can't access the field's declaring class but may still be able
     // to access the field if the FieldId specifies an accessible subclass of the declaring
     // class rather than the declaring class itself.
-    const DexCache* referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache;
+    DexCache* referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache;
     uint32_t class_idx = referrer_dex_cache->GetDexFile()->GetFieldId(field_idx).class_idx_;
     // The referenced class has already been resolved with the field, get it from the dex cache.
     Class* dex_access_to = referrer_dex_cache->GetResolvedType(class_idx);
@@ -236,14 +231,14 @@
 
 template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
 inline bool Class::ResolvedMethodAccessTest(Class* access_to, ArtMethod* method,
-                                            uint32_t method_idx, const DexCache* dex_cache) {
+                                            uint32_t method_idx, DexCache* dex_cache) {
   COMPILE_ASSERT(throw_on_failure || throw_invoke_type == kStatic, non_default_throw_invoke_type);
   DCHECK_EQ(use_referrers_cache, dex_cache == nullptr);
   if (UNLIKELY(!this->CanAccess(access_to))) {
     // The referrer class can't access the method's declaring class but may still be able
     // to access the method if the MethodId specifies an accessible subclass of the declaring
     // class rather than the declaring class itself.
-    const DexCache* referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache;
+    DexCache* referrer_dex_cache = use_referrers_cache ? this->GetDexCache() : dex_cache;
     uint32_t class_idx = referrer_dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_;
     // The referenced class has already been resolved with the method, get it from the dex cache.
     Class* dex_access_to = referrer_dex_cache->GetResolvedType(class_idx);
@@ -268,8 +263,8 @@
 }
 
 inline bool Class::CanAccessResolvedField(Class* access_to, ArtField* field,
-                                          const DexCache& dex_cache, uint32_t field_idx) {
-  return ResolvedFieldAccessTest<false, false>(access_to, field, field_idx, &dex_cache);
+                                          DexCache* dex_cache, uint32_t field_idx) {
+  return ResolvedFieldAccessTest<false, false>(access_to, field, field_idx, dex_cache);
 }
 
 inline bool Class::CheckResolvedFieldAccess(Class* access_to, ArtField* field,
@@ -278,8 +273,8 @@
 }
 
 inline bool Class::CanAccessResolvedMethod(Class* access_to, ArtMethod* method,
-                                           const DexCache& dex_cache, uint32_t method_idx) {
-  return ResolvedMethodAccessTest<false, false, kStatic>(access_to, method, method_idx, &dex_cache);
+                                           DexCache* dex_cache, uint32_t method_idx) {
+  return ResolvedMethodAccessTest<false, false, kStatic>(access_to, method, method_idx, dex_cache);
 }
 
 template <InvokeType throw_invoke_type>
@@ -289,10 +284,10 @@
                                                                  nullptr);
 }
 
-inline bool Class::IsSubClass(const Class* klass) const {
+inline bool Class::IsSubClass(Class* klass) {
   DCHECK(!IsInterface()) << PrettyClass(this);
   DCHECK(!IsArrayClass()) << PrettyClass(this);
-  const Class* current = this;
+  Class* current = this;
   do {
     if (current == klass) {
       return true;
@@ -302,7 +297,7 @@
   return false;
 }
 
-inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) const {
+inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) {
   Class* declaring_class = method->GetDeclaringClass();
   DCHECK(declaring_class != NULL) << PrettyClass(this);
   DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
@@ -317,21 +312,19 @@
   return NULL;
 }
 
-inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) {
   DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsMiranda());
   // The argument method may from a super class.
   // Use the index to a potentially overridden one for this instance's class.
   return GetVTable()->Get(method->GetMethodIndex());
 }
 
-inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method) {
   DCHECK(!method->GetDeclaringClass()->IsInterface());
   return GetSuperClass()->GetVTable()->Get(method->GetMethodIndex());
 }
 
-inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method) const {
+inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method) {
   if (method->IsDirect()) {
     return method;
   }
@@ -341,11 +334,11 @@
   return FindVirtualMethodForVirtual(method);
 }
 
-inline IfTable* Class::GetIfTable() const {
-  return GetFieldObject<IfTable*>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), false);
+inline IfTable* Class::GetIfTable() {
+  return GetFieldObject<IfTable>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), false);
 }
 
-inline int32_t Class::GetIfTableCount() const {
+inline int32_t Class::GetIfTableCount() {
   IfTable* iftable = GetIfTable();
   if (iftable == NULL) {
     return 0;
@@ -357,59 +350,58 @@
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable, false);
 }
 
-inline ObjectArray<ArtField>* Class::GetIFields() const {
+inline ObjectArray<ArtField>* Class::GetIFields() {
   DCHECK(IsLoaded() || IsErroneous());
-  return GetFieldObject<ObjectArray<ArtField>*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false);
+  return GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false);
 }
 
 inline void Class::SetIFields(ObjectArray<ArtField>* new_ifields)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(NULL == GetFieldObject<ObjectArray<ArtField>*>(
+  DCHECK(NULL == GetFieldObject<ObjectArray<ArtField> >(
       OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false));
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields, false);
 }
 
-inline ObjectArray<ArtField>* Class::GetSFields() const {
+inline ObjectArray<ArtField>* Class::GetSFields() {
   DCHECK(IsLoaded() || IsErroneous());
-  return GetFieldObject<ObjectArray<ArtField>*>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false);
+  return GetFieldObject<ObjectArray<ArtField> >(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false);
 }
 
 inline void Class::SetSFields(ObjectArray<ArtField>* new_sfields)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(NULL == GetFieldObject<ObjectArray<ArtField>*>(
+  DCHECK(NULL == GetFieldObject<ObjectArray<ArtField> >(
       OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false));
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields, false);
 }
 
-inline size_t Class::NumStaticFields() const {
+inline uint32_t Class::NumStaticFields() {
   return (GetSFields() != NULL) ? GetSFields()->GetLength() : 0;
 }
 
-inline ArtField* Class::GetStaticField(uint32_t i) const  // TODO: uint16_t
+inline ArtField* Class::GetStaticField(uint32_t i)  // TODO: uint16_t
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return GetSFields()->Get(i);
 }
 
 inline void Class::SetStaticField(uint32_t i, ArtField* f)  // TODO: uint16_t
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  ObjectArray<ArtField>* sfields= GetFieldObject<ObjectArray<ArtField>*>(
+  ObjectArray<ArtField>* sfields= GetFieldObject<ObjectArray<ArtField> >(
       OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false);
   sfields->Set(i, f);
 }
 
-inline size_t Class::NumInstanceFields() const {
+inline uint32_t Class::NumInstanceFields() {
   return (GetIFields() != NULL) ? GetIFields()->GetLength() : 0;
 }
 
-inline ArtField* Class::GetInstanceField(uint32_t i) const  // TODO: uint16_t
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline ArtField* Class::GetInstanceField(uint32_t i) {  // TODO: uint16_t
   DCHECK_NE(NumInstanceFields(), 0U);
   return GetIFields()->Get(i);
 }
 
 inline void Class::SetInstanceField(uint32_t i, ArtField* f)  // TODO: uint16_t
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  ObjectArray<ArtField>* ifields= GetFieldObject<ObjectArray<ArtField>*>(
+  ObjectArray<ArtField>* ifields= GetFieldObject<ObjectArray<ArtField> >(
       OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false);
   ifields->Set(i, f);
 }
@@ -419,7 +411,7 @@
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass, false);
 }
 
-inline uint32_t Class::GetAccessFlags() const {
+inline uint32_t Class::GetAccessFlags() {
   // Check class is loaded or this is java.lang.String that has a
   // circularity issue during loading the names of its members
   DCHECK(IsLoaded() || IsErroneous() ||
@@ -429,8 +421,8 @@
   return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false);
 }
 
-inline String* Class::GetName() const {
-  return GetFieldObject<String*>(OFFSET_OF_OBJECT_MEMBER(Class, name_), false);
+inline String* Class::GetName() {
+  return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_), false);
 }
 inline void Class::SetName(String* name) {
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, name_), name, false);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index bd965fa..8051c9b 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -125,7 +125,7 @@
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache, false);
 }
 
-void Class::SetClassSize(size_t new_class_size) {
+void Class::SetClassSize(uint32_t new_class_size) {
   if (kIsDebugBuild && (new_class_size < GetClassSize())) {
     DumpClass(LOG(ERROR), kDumpClassFullDetail);
     CHECK_GE(new_class_size, GetClassSize()) << " class=" << PrettyTypeOf(this);
@@ -177,7 +177,7 @@
   return name;
 }
 
-void Class::DumpClass(std::ostream& os, int flags) const {
+void Class::DumpClass(std::ostream& os, int flags) {
   if ((flags & kDumpClassFullDetail) == 0) {
     os << PrettyClass(this);
     if ((flags & kDumpClassClassLoader) != 0) {
@@ -281,9 +281,9 @@
   }
 }
 
-bool Class::IsInSamePackage(const Class* that) const {
-  const Class* klass1 = this;
-  const Class* klass2 = that;
+bool Class::IsInSamePackage(Class* that) {
+  Class* klass1 = this;
+  Class* klass2 = that;
   if (klass1 == klass2) {
     return true;
   }
@@ -307,7 +307,7 @@
                          ClassHelper(klass2).GetDescriptor());
 }
 
-bool Class::IsClassClass() const {
+bool Class::IsClassClass() {
   Class* java_lang_Class = GetClass()->GetClass();
   return this == java_lang_Class;
 }
@@ -316,17 +316,17 @@
   return this == String::GetJavaLangString();
 }
 
-bool Class::IsThrowableClass() const {
+bool Class::IsThrowableClass() {
   return WellKnownClasses::ToClass(WellKnownClasses::java_lang_Throwable)->IsAssignableFrom(this);
 }
 
-bool Class::IsArtFieldClass() const {
+bool Class::IsArtFieldClass() {
   Class* java_lang_Class = GetClass();
   Class* java_lang_reflect_ArtField = java_lang_Class->GetInstanceField(0)->GetClass();
   return this == java_lang_reflect_ArtField;
 }
 
-bool Class::IsArtMethodClass() const {
+bool Class::IsArtMethodClass() {
   return this == ArtMethod::GetJavaLangReflectArtMethod();
 }
 
@@ -334,7 +334,7 @@
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false);
 }
 
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature) const {
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature) {
   // Check the current class before checking the interfaces.
   ArtMethod* method = FindDeclaredVirtualMethod(name, signature);
   if (method != NULL) {
@@ -352,7 +352,7 @@
   return NULL;
 }
 
-ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const {
+ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
   // Check the current class before checking the interfaces.
   ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx);
   if (method != NULL) {
@@ -370,7 +370,7 @@
   return NULL;
 }
 
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const {
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) {
   MethodHelper mh;
   for (size_t i = 0; i < NumDirectMethods(); ++i) {
     ArtMethod* method = GetDirectMethod(i);
@@ -382,7 +382,7 @@
   return NULL;
 }
 
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature) const {
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature) {
   MethodHelper mh;
   for (size_t i = 0; i < NumDirectMethods(); ++i) {
     ArtMethod* method = GetDirectMethod(i);
@@ -394,7 +394,7 @@
   return NULL;
 }
 
-ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const {
+ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
   if (GetDexCache() == dex_cache) {
     for (size_t i = 0; i < NumDirectMethods(); ++i) {
       ArtMethod* method = GetDirectMethod(i);
@@ -406,8 +406,8 @@
   return NULL;
 }
 
-ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature) const {
-  for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature) {
+  for (Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
     ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature);
     if (method != NULL) {
       return method;
@@ -416,8 +416,8 @@
   return NULL;
 }
 
-ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature) const {
-  for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature) {
+  for (Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
     ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature);
     if (method != NULL) {
       return method;
@@ -426,8 +426,8 @@
   return NULL;
 }
 
-ArtMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const {
-  for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
+  for (Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
     ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx);
     if (method != NULL) {
       return method;
@@ -436,7 +436,7 @@
   return NULL;
 }
 
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const {
+ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) {
   MethodHelper mh;
   for (size_t i = 0; i < NumVirtualMethods(); ++i) {
     ArtMethod* method = GetVirtualMethod(i);
@@ -449,7 +449,7 @@
 }
 
 ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
-                                            const Signature& signature) const {
+                                            const Signature& signature) {
   MethodHelper mh;
   for (size_t i = 0; i < NumVirtualMethods(); ++i) {
     ArtMethod* method = GetVirtualMethod(i);
@@ -461,7 +461,7 @@
   return NULL;
 }
 
-ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const {
+ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
   if (GetDexCache() == dex_cache) {
     for (size_t i = 0; i < NumVirtualMethods(); ++i) {
       ArtMethod* method = GetVirtualMethod(i);
@@ -473,8 +473,8 @@
   return NULL;
 }
 
-ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece& signature) const {
-  for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece& signature) {
+  for (Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
     ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature);
     if (method != NULL) {
       return method;
@@ -483,8 +483,8 @@
   return NULL;
 }
 
-ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const Signature& signature) const {
-  for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const Signature& signature) {
+  for (Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
     ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature);
     if (method != NULL) {
       return method;
@@ -493,8 +493,8 @@
   return NULL;
 }
 
-ArtMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const {
-  for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
+  for (Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
     ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx);
     if (method != NULL) {
       return method;
@@ -503,7 +503,7 @@
   return NULL;
 }
 
-ArtMethod* Class::FindClassInitializer() const {
+ArtMethod* Class::FindClassInitializer() {
   for (size_t i = 0; i < NumDirectMethods(); ++i) {
     ArtMethod* method = GetDirectMethod(i);
     if (method->IsConstructor() && method->IsStatic()) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index a692381..cbec476 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -120,7 +120,7 @@
     kStatusMax = 10,
   };
 
-  Status GetStatus() const {
+  Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK_EQ(sizeof(Status), sizeof(uint32_t));
     return static_cast<Status>(GetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), true));
   }
@@ -132,107 +132,107 @@
   }
 
   // Returns true if the class has failed to link.
-  bool IsErroneous() const {
+  bool IsErroneous() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStatus() == kStatusError;
   }
 
   // Returns true if the class has been loaded.
-  bool IsIdxLoaded() const {
+  bool IsIdxLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStatus() >= kStatusIdx;
   }
 
   // Returns true if the class has been loaded.
-  bool IsLoaded() const {
+  bool IsLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStatus() >= kStatusLoaded;
   }
 
   // Returns true if the class has been linked.
-  bool IsResolved() const {
+  bool IsResolved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStatus() >= kStatusResolved;
   }
 
   // Returns true if the class was compile-time verified.
-  bool IsCompileTimeVerified() const {
+  bool IsCompileTimeVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStatus() >= kStatusRetryVerificationAtRuntime;
   }
 
   // Returns true if the class has been verified.
-  bool IsVerified() const {
+  bool IsVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStatus() >= kStatusVerified;
   }
 
   // Returns true if the class is initializing.
-  bool IsInitializing() const {
+  bool IsInitializing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStatus() >= kStatusInitializing;
   }
 
   // Returns true if the class is initialized.
-  bool IsInitialized() const {
+  bool IsInitialized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStatus() == kStatusInitialized;
   }
 
-  uint32_t GetAccessFlags() const;
+  uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetAccessFlags(uint32_t new_access_flags) {
+  void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags, false);
   }
 
   // Returns true if the class is an interface.
-  bool IsInterface() const {
+  bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccInterface) != 0;
   }
 
   // Returns true if the class is declared public.
-  bool IsPublic() const {
+  bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
   // Returns true if the class is declared final.
-  bool IsFinal() const {
+  bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  bool IsFinalizable() const {
+  bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
   }
 
-  void SetFinalizable() {
+  void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false);
     SetAccessFlags(flags | kAccClassIsFinalizable);
   }
 
   // Returns true if the class is abstract.
-  bool IsAbstract() const {
+  bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAbstract) != 0;
   }
 
   // Returns true if the class is an annotation.
-  bool IsAnnotation() const {
+  bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAnnotation) != 0;
   }
 
   // Returns true if the class is synthetic.
-  bool IsSynthetic() const {
+  bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
-  bool IsReferenceClass() const {
+  bool IsReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccClassIsReference) != 0;
   }
 
-  bool IsWeakReferenceClass() const {
+  bool IsWeakReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccClassIsWeakReference) != 0;
   }
 
-  bool IsSoftReferenceClass() const {
+  bool IsSoftReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccReferenceFlagsMask) == kAccClassIsReference;
   }
 
-  bool IsFinalizerReferenceClass() const {
+  bool IsFinalizerReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccClassIsFinalizerReference) != 0;
   }
 
-  bool IsPhantomReferenceClass() const {
+  bool IsPhantomReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccClassIsPhantomReference) != 0;
   }
 
@@ -241,7 +241,7 @@
   // For array classes, where all the classes are final due to there being no sub-classes, an
   // Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
   // types as the component is final.
-  bool CannotBeAssignedFromOtherTypes() const {
+  bool CannotBeAssignedFromOtherTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     if (!IsArrayClass()) {
       return IsFinal();
     } else {
@@ -254,12 +254,12 @@
     }
   }
 
-  String* GetName() const;  // Returns the cached name.
+  String* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);  // Returns the cached name.
   void SetName(String* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);  // Sets the cached name.
   // Computes the name, then sets the cached value.
   String* ComputeName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsProxyClass() const {
+  bool IsProxyClass() {
     // Read access flags without using getter as whether something is a proxy can be check in
     // any loaded state
     // TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -267,91 +267,91 @@
     return (access_flags & kAccClassIsProxy) != 0;
   }
 
-  Primitive::Type GetPrimitiveType() const {
+  Primitive::Type GetPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
     return static_cast<Primitive::Type>(
         GetField32(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), false));
   }
 
-  void SetPrimitiveType(Primitive::Type new_type) {
+  void SetPrimitiveType(Primitive::Type new_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
     SetField32(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), new_type, false);
   }
 
   // Returns true if the class is a primitive type.
-  bool IsPrimitive() const {
+  bool IsPrimitive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() != Primitive::kPrimNot;
   }
 
-  bool IsPrimitiveBoolean() const {
+  bool IsPrimitiveBoolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimBoolean;
   }
 
-  bool IsPrimitiveByte() const {
+  bool IsPrimitiveByte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimByte;
   }
 
-  bool IsPrimitiveChar() const {
+  bool IsPrimitiveChar() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimChar;
   }
 
-  bool IsPrimitiveShort() const {
+  bool IsPrimitiveShort() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimShort;
   }
 
-  bool IsPrimitiveInt() const {
+  bool IsPrimitiveInt() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimInt;
   }
 
-  bool IsPrimitiveLong() const {
+  bool IsPrimitiveLong() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimLong;
   }
 
-  bool IsPrimitiveFloat() const {
+  bool IsPrimitiveFloat() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimFloat;
   }
 
-  bool IsPrimitiveDouble() const {
+  bool IsPrimitiveDouble() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimDouble;
   }
 
-  bool IsPrimitiveVoid() const {
+  bool IsPrimitiveVoid() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimVoid;
   }
 
-  bool IsPrimitiveArray() const {
+  bool IsPrimitiveArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return IsArrayClass() && GetComponentType()->IsPrimitive();
   }
 
   // Depth of class from java.lang.Object
-  size_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    size_t depth = 0;
+  uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    uint32_t depth = 0;
     for (Class* klass = this; klass->GetSuperClass() != NULL; klass = klass->GetSuperClass()) {
       depth++;
     }
     return depth;
   }
 
-  bool IsArrayClass() const {
+  bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetComponentType() != NULL;
   }
 
-  bool IsClassClass() const;
+  bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsStringClass() const;
 
-  bool IsThrowableClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsArtFieldClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsArtFieldClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsArtMethodClass() const;
+  bool IsArtMethodClass();
 
   static MemberOffset ComponentTypeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
   }
 
-  Class* GetComponentType() const {
-    return GetFieldObject<Class*>(ComponentTypeOffset(), false);
+  Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<Class>(ComponentTypeOffset(), false);
   }
 
   void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -360,18 +360,18 @@
     SetFieldObject(ComponentTypeOffset(), new_component_type, false);
   }
 
-  size_t GetComponentSize() const {
+  size_t GetComponentSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return Primitive::ComponentSize(GetComponentType()->GetPrimitiveType());
   }
 
-  bool IsObjectClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return !IsPrimitive() && GetSuperClass() == NULL;
   }
-  bool IsInstantiable() const {
+  bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (!IsPrimitive() && !IsInterface() && !IsAbstract()) || ((IsAbstract()) && IsArrayClass());
   }
 
-  bool IsObjectArrayClass() const {
+  bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetComponentType() != NULL && !GetComponentType()->IsPrimitive();
   }
 
@@ -385,48 +385,44 @@
   Object* AllocNonMovableObject(Thread* self)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsVariableSize() const {
+  bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Classes and arrays vary in size, and so the object_size_ field cannot
     // be used to get their instance size
     return IsClassClass() || IsArrayClass();
   }
 
-  size_t SizeOf() const {
-    DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
+  uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
   }
 
-  size_t GetClassSize() const {
-    DCHECK_EQ(sizeof(size_t), sizeof(uint32_t));
+  uint32_t GetClassSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
   }
 
-  void SetClassSize(size_t new_class_size)
+  void SetClassSize(uint32_t new_class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t GetObjectSize() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t GetObjectSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetObjectSize(size_t new_object_size) {
+  void SetObjectSize(uint32_t new_object_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(!IsVariableSize());
-    DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
     return SetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size, false);
   }
 
   // Returns true if this class is in the same packages as that class.
-  bool IsInSamePackage(const Class* that) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
 
   // Returns true if this class can access that class.
-  bool CanAccess(Class* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool CanAccess(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return that->IsPublic() || this->IsInSamePackage(that);
   }
 
   // Can this class access a member in the provided class with the provided member access flags?
   // Note that access to the class isn't checked in case the declaring class is protected and the
   // method has been exposed by a public sub-class
-  bool CanAccessMember(Class* access_to, uint32_t member_flags) const
+  bool CanAccessMember(Class* access_to, uint32_t member_flags)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Classes can access all of their own members
     if (this == access_to) {
@@ -454,7 +450,7 @@
   // Note that access to field's class is checked and this may require looking up the class
   // referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
   bool CanAccessResolvedField(Class* access_to, ArtField* field,
-                              const DexCache& dex_cache, uint32_t field_idx)
+                              DexCache* dex_cache, uint32_t field_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool CheckResolvedFieldAccess(Class* access_to, ArtField* field,
                                 uint32_t field_idx)
@@ -464,22 +460,21 @@
   // Note that access to methods's class is checked and this may require looking up the class
   // referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
   bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
-                               const DexCache& dex_cache, uint32_t method_idx)
+                               DexCache* dex_cache, uint32_t method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template <InvokeType throw_invoke_type>
   bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
                                  uint32_t method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsSubClass(const Class* klass) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsSubClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can src be assigned to this class? For example, String can be assigned to Object (by an
   // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing
   // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
   // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
   // to themselves. Classes for primitive types may not assign to each other.
-  inline bool IsAssignableFrom(const Class* src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  inline bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(src != NULL);
     if (this == src) {
       // Can always assign to things of the same type.
@@ -496,18 +491,18 @@
     }
   }
 
-  Class* GetSuperClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    // super class is assigned once, except during class linker initialization
-    Class* old_super_class = GetFieldObject<Class*>(
-        OFFSET_OF_OBJECT_MEMBER(Class, super_class_), false);
-    DCHECK(old_super_class == NULL || old_super_class == new_super_class);
-    DCHECK(new_super_class != NULL);
+    // Super class is assigned once, except during class linker initialization.
+    Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_),
+                                                   false);
+    DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
+    DCHECK(new_super_class != nullptr);
     SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class, false);
   }
 
-  bool HasSuperClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetSuperClass() != NULL;
   }
 
@@ -515,7 +510,7 @@
     return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
   }
 
-  ClassLoader* GetClassLoader() const;
+  ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -529,46 +524,43 @@
     kDumpClassInitialized = (1 << 2),
   };
 
-  void DumpClass(std::ostream& os, int flags) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DumpClass(std::ostream& os, int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  DexCache* GetDexCache() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetDirectMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectArray<ArtMethod>* GetDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* GetDirectMethod(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetDirectMethod(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDirectMethod(uint32_t i, ArtMethod* f)  // TODO: uint16_t
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns the number of static, private, and constructor methods.
-  size_t NumDirectMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetVirtualMethods() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectArray<ArtMethod>* GetVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns the number of non-inherited virtual methods.
-  size_t NumVirtualMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* GetVirtualMethod(uint32_t i) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetVirtualMethod(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* GetVirtualMethodDuringLinking(uint32_t i) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetVirtualMethodDuringLinking(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetVirtualMethod(uint32_t i, ArtMethod* f)  // TODO: uint16_t
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetVTable() const;
+  ObjectArray<ArtMethod>* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetVTableDuringLinking() const;
+  ObjectArray<ArtMethod>* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetVTable(ObjectArray<ArtMethod>* new_vtable)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -577,7 +569,7 @@
     return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
   }
 
-  ObjectArray<ArtMethod>* GetImTable() const;
+  ObjectArray<ArtMethod>* GetImTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetImTable(ObjectArray<ArtMethod>* new_imtable)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -588,105 +580,102 @@
 
   // Given a method implemented by this class but potentially from a super class, return the
   // specific implementation method for this class.
-  ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method) const
+  ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Given a method implemented by this class' super class, return the specific implementation
   // method for this class.
-  ArtMethod* FindVirtualMethodForSuper(ArtMethod* method) const
+  ArtMethod* FindVirtualMethodForSuper(ArtMethod* method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Given a method implemented by this class, but potentially from a
   // super class or interface, return the specific implementation
   // method for this class.
-  ArtMethod* FindVirtualMethodForInterface(ArtMethod* method) const
+  ArtMethod* FindVirtualMethodForInterface(ArtMethod* method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
 
-  ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method) const
+  ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature) const
+  ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+  ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const
+  ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature) const
+  ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+  ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const
+  ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature) const
+  ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+  ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const
+  ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature) const
+  ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+  ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature) const
+  ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature) const
+  ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+  ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* FindClassInitializer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* FindClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  int32_t GetIfTableCount() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  IfTable* GetIfTable() const;
+  IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get instance fields of the class (See also GetSFields).
-  ObjectArray<ArtField>* GetIFields() const;
+  ObjectArray<ArtField>* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetIFields(ObjectArray<ArtField>* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t NumInstanceFields() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t NumInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtField* GetInstanceField(uint32_t i) const  // TODO: uint16_t
+  ArtField* GetInstanceField(uint32_t i)  // TODO: uint16_t
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetInstanceField(uint32_t i, ArtField* f)  // TODO: uint16_t
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns the number of instance fields containing reference types.
-  size_t NumReferenceInstanceFields() const {
+  uint32_t NumReferenceInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(IsResolved() || IsErroneous());
-    DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), false);
   }
 
-  size_t NumReferenceInstanceFieldsDuringLinking() const {
+  uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(IsLoaded() || IsErroneous());
-    DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), false);
   }
 
-  void SetNumReferenceInstanceFields(size_t new_num) {
-    DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
+  void SetNumReferenceInstanceFields(uint32_t new_num) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num, false);
   }
 
-  uint32_t GetReferenceInstanceOffsets() const {
+  uint32_t GetReferenceInstanceOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(IsResolved() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_), false);
   }
@@ -700,39 +689,39 @@
   }
 
   // Returns the number of static fields containing reference types.
-  size_t NumReferenceStaticFields() const {
+  uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(IsResolved() || IsErroneous());
-    DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), false);
   }
 
-  size_t NumReferenceStaticFieldsDuringLinking() const {
+  uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(IsLoaded() || IsErroneous());
-    DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), false);
   }
 
-  void SetNumReferenceStaticFields(size_t new_num) {
-    DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
+  void SetNumReferenceStaticFields(uint32_t new_num) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num, false);
   }
 
   // Gets the static fields of the class.
-  ObjectArray<ArtField>* GetSFields() const;
+  ObjectArray<ArtField>* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetSFields(ObjectArray<ArtField>* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t NumStaticFields() const;
+  uint32_t NumStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtField* GetStaticField(uint32_t i) const;  // TODO: uint16_t
+  // TODO: uint16_t
+  ArtField* GetStaticField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetStaticField(uint32_t i, ArtField* f);  // TODO: uint16_t
+  // TODO: uint16_t
+  void SetStaticField(uint32_t i, ArtField* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint32_t GetReferenceStaticOffsets() const {
+  uint32_t GetReferenceStaticOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_), false);
   }
 
-  void SetReferenceStaticOffsets(uint32_t new_reference_offsets);
+  void SetReferenceStaticOffsets(uint32_t new_reference_offsets)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Find a static or instance field using the JLS resolution order
   ArtField* FindField(const StringPiece& name, const StringPiece& type)
@@ -768,33 +757,33 @@
   ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  pid_t GetClinitThreadId() const {
+  pid_t GetClinitThreadId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(IsIdxLoaded() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), false);
   }
 
-  void SetClinitThreadId(pid_t new_clinit_thread_id) {
+  void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), new_clinit_thread_id, false);
   }
 
-  Class* GetVerifyErrorClass() const {
+  Class* GetVerifyErrorClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // DCHECK(IsErroneous());
-    return GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), false);
+    return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), false);
   }
 
-  uint16_t GetDexClassDefIndex() const {
+  uint16_t GetDexClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), false);
   }
 
-  void SetDexClassDefIndex(uint16_t class_def_idx) {
+  void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx, false);
   }
 
-  uint16_t GetDexTypeIndex() const {
+  uint16_t GetDexTypeIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), false);
   }
 
-  void SetDexTypeIndex(uint16_t type_idx) {
+  void SetDexTypeIndex(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx, false);
   }
 
@@ -817,35 +806,32 @@
 
   template <bool throw_on_failure, bool use_referrers_cache>
   bool ResolvedFieldAccessTest(Class* access_to, ArtField* field,
-                               uint32_t field_idx, const DexCache* dex_cache)
+                               uint32_t field_idx, DexCache* dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
   bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method,
-                                uint32_t method_idx, const DexCache* dex_cache)
+                                uint32_t method_idx, DexCache* dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool Implements(const Class* klass) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsArrayAssignableFromArray(const Class* klass) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsAssignableFromArray(const Class* klass) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool Implements(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsArrayAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // defining class loader, or NULL for the "bootstrap" system loader
-  ClassLoader* class_loader_;
+  HeapReference<ClassLoader> class_loader_;
 
   // For array classes, the component class object for instanceof/checkcast
   // (for String[][][], this will be String[][]). NULL for non-array classes.
-  Class* component_type_;
+  HeapReference<Class> component_type_;
 
   // DexCache of resolved constant pool entries (will be NULL for classes generated by the
   // runtime such as arrays and primitive classes).
-  DexCache* dex_cache_;
+  HeapReference<DexCache> dex_cache_;
 
   // static, private, and <init> methods
-  ObjectArray<ArtMethod>* direct_methods_;
+  HeapReference<ObjectArray<ArtMethod> > direct_methods_;
 
   // instance fields
   //
@@ -857,7 +843,7 @@
   // All instance fields that refer to objects are guaranteed to be at
   // the beginning of the field list.  num_reference_instance_fields_
   // specifies the number of reference fields.
-  ObjectArray<ArtField>* ifields_;
+  HeapReference<ObjectArray<ArtField> > ifields_;
 
   // The interface table (iftable_) contains pairs of a interface class and an array of the
   // interface methods. There is one pair per interface supported by this class.  That means one
@@ -870,38 +856,38 @@
   //
   // For every interface a concrete class implements, we create an array of the concrete vtable_
   // methods for the methods in the interface.
-  IfTable* iftable_;
+  HeapReference<IfTable> iftable_;
 
   // Interface method table (imt), for quick "invoke-interface".
-  ObjectArray<ArtMethod>* imtable_;
+  HeapReference<ObjectArray<ArtMethod> > imtable_;
 
-  // descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
-  String* name_;
+  // Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
+  HeapReference<String> name_;
 
   // Static fields
-  ObjectArray<ArtField>* sfields_;
+  HeapReference<ObjectArray<ArtField>> sfields_;
 
   // The superclass, or NULL if this is java.lang.Object, an interface or primitive type.
-  Class* super_class_;
+  HeapReference<Class> super_class_;
 
   // If class verify fails, we must return same error on subsequent tries.
-  Class* verify_error_class_;
+  HeapReference<Class> verify_error_class_;
 
   // Virtual methods defined in this class; invoked through vtable.
-  ObjectArray<ArtMethod>* virtual_methods_;
+  HeapReference<ObjectArray<ArtMethod> > virtual_methods_;
 
   // Virtual method table (vtable), for use by "invoke-virtual".  The vtable from the superclass is
   // copied in, and virtual methods from our class either replace those from the super or are
   // appended. For abstract classes, methods may be created in the vtable that aren't in
   // virtual_ methods_ for miranda methods.
-  ObjectArray<ArtMethod>* vtable_;
+  HeapReference<ObjectArray<ArtMethod> > vtable_;
 
   // Access flags; low 16 bits are defined by VM spec.
   uint32_t access_flags_;
 
   // Total size of the Class instance; used when allocating storage on gc heap.
   // See also object_size_.
-  size_t class_size_;
+  uint32_t class_size_;
 
   // Tid used to check for recursive <clinit> invocation.
   pid_t clinit_thread_id_;
@@ -915,15 +901,15 @@
   int32_t dex_type_idx_;
 
   // Number of instance fields that are object refs.
-  size_t num_reference_instance_fields_;
+  uint32_t num_reference_instance_fields_;
 
   // Number of static fields that are object refs,
-  size_t num_reference_static_fields_;
+  uint32_t num_reference_static_fields_;
 
   // Total object size; used when allocating storage on gc heap.
   // (For interfaces and abstract classes this will be zero.)
   // See also class_size_.
-  size_t object_size_;
+  uint32_t object_size_;
 
   // Primitive type value, or Primitive::kPrimNot (0); set for generated primitive classes.
   Primitive::Type primitive_type_;
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 415cb67..69accf5 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -32,9 +32,9 @@
 class MANAGED ClassLoader : public Object {
  private:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
-  Object* packages_;
-  ClassLoader* parent_;
-  Object* proxyCache_;
+  HeapReference<Object> packages_;
+  HeapReference<ClassLoader> parent_;
+  HeapReference<Object> proxyCache_;
 
   friend struct art::ClassLoaderOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(ClassLoader);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index da26be5..f59c3a2 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -22,7 +22,7 @@
 namespace art {
 namespace mirror {
 
-inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx) const
+inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ArtMethod* method = GetResolvedMethods()->Get(method_idx);
   // Hide resolution trampoline methods from the caller
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index a5fe598..99529f0 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -52,8 +52,8 @@
 
   void Fixup(ArtMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  String* GetLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldObject<String*>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), false);
+  String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), false);
   }
 
   static MemberOffset StringsOffset() {
@@ -68,24 +68,23 @@
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
   }
 
-  size_t NumStrings() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStrings()->GetLength();
   }
 
-  size_t NumResolvedTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetResolvedTypes()->GetLength();
   }
 
-  size_t NumResolvedMethods() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetResolvedMethods()->GetLength();
   }
 
-  size_t NumResolvedFields() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumResolvedFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetResolvedFields()->GetLength();
   }
 
-  String* GetResolvedString(uint32_t string_idx) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  String* GetResolvedString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetStrings()->Get(string_idx);
   }
 
@@ -94,8 +93,7 @@
     GetStrings()->Set(string_idx, resolved);
   }
 
-  Class* GetResolvedType(uint32_t type_idx) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Class* GetResolvedType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetResolvedTypes()->Get(type_idx);
   }
 
@@ -104,16 +102,14 @@
     GetResolvedTypes()->Set(type_idx, resolved);
   }
 
-  ArtMethod* GetResolvedMethod(uint32_t method_idx) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetResolvedMethod(uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     GetResolvedMethods()->Set(method_idx, resolved);
   }
 
-  ArtField* GetResolvedField(uint32_t field_idx) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ArtField* GetResolvedField(uint32_t field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetResolvedFields()->Get(field_idx);
   }
 
@@ -122,28 +118,24 @@
     GetResolvedFields()->Set(field_idx, resolved);
   }
 
-  ObjectArray<String>* GetStrings() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldObject< ObjectArray<String>* >(StringsOffset(), false);
+  ObjectArray<String>* GetStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject< ObjectArray<String> >(StringsOffset(), false);
   }
 
-  ObjectArray<Class>* GetResolvedTypes() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldObject< ObjectArray<Class>* >(
+  ObjectArray<Class>* GetResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<ObjectArray<Class> >(
         OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), false);
   }
 
-  ObjectArray<ArtMethod>* GetResolvedMethods() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldObject< ObjectArray<ArtMethod>* >(ResolvedMethodsOffset(), false);
+  ObjectArray<ArtMethod>* GetResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject< ObjectArray<ArtMethod> >(ResolvedMethodsOffset(), false);
   }
 
-  ObjectArray<ArtField>* GetResolvedFields() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldObject< ObjectArray<ArtField>* >(ResolvedFieldsOffset(), false);
+  ObjectArray<ArtField>* GetResolvedFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<ObjectArray<ArtField> >(ResolvedFieldsOffset(), false);
   }
 
-  const DexFile* GetDexFile() const {
+  const DexFile* GetDexFile() {
     return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), false);
   }
 
@@ -152,13 +144,13 @@
   }
 
  private:
-  Object* dex_;
-  String* location_;
-  ObjectArray<ArtField>* resolved_fields_;
-  ObjectArray<ArtMethod>* resolved_methods_;
-  ObjectArray<Class>* resolved_types_;
-  ObjectArray<String>* strings_;
-  uint32_t dex_file_;
+  HeapReference<Object> dex_;
+  HeapReference<String> location_;
+  HeapReference<ObjectArray<ArtField> > resolved_fields_;
+  HeapReference<ObjectArray<ArtMethod> > resolved_methods_;
+  HeapReference<ObjectArray<Class> > resolved_types_;
+  HeapReference<ObjectArray<String> > strings_;
+  uint64_t dex_file_;
 
   friend struct art::DexCacheOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache);
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 421893d..be83d03 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -24,7 +24,7 @@
 
 class MANAGED IfTable : public ObjectArray<Object> {
  public:
-  Class* GetInterface(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     Class* interface = Get((i * kMax) + kInterface)->AsClass();
     DCHECK(interface != NULL);
     return interface;
@@ -32,15 +32,14 @@
 
   void SetInterface(int32_t i, Class* interface) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetMethodArray(int32_t i) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ObjectArray<ArtMethod>* method_array =
         down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
     DCHECK(method_array != NULL);
     return method_array;
   }
 
-  size_t GetMethodArrayCount(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ObjectArray<ArtMethod>* method_array =
         down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
     if (method_array == NULL) {
@@ -56,7 +55,7 @@
     Set((i * kMax) + kMethodArray, new_ma);
   }
 
-  size_t Count() const {
+  size_t Count() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetLength() / kMax;
   }
 
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 9161bc5..afa4112 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -32,19 +32,18 @@
 namespace art {
 namespace mirror {
 
-inline Class* Object::GetClass() const {
-  return GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
+inline Class* Object::GetClass() {
+  return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
 }
 
 inline void Object::SetClass(Class* new_klass) {
-  // new_klass may be NULL prior to class linker initialization
-  // We don't mark the card since the class is guaranteed to be referenced from another location.
-  // Proxy classes are held live by the class loader, and other classes are roots of the class
-  // linker.
-  SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false, false);
+  // new_klass may be NULL prior to class linker initialization.
+  // We don't mark the card as this occurs as part of object allocation. Not all objects have
+  // backing cards, such as large objects.
+  SetFieldObjectWithoutWriteBarrier(OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false, false);
 }
 
-inline LockWord Object::GetLockWord() const {
+inline LockWord Object::GetLockWord() {
   return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), true));
 }
 
@@ -85,19 +84,19 @@
   Monitor::Wait(self, this, ms, ns, true, kTimedWaiting);
 }
 
-inline bool Object::VerifierInstanceOf(const Class* klass) const {
+inline bool Object::VerifierInstanceOf(Class* klass) {
   DCHECK(klass != NULL);
   DCHECK(GetClass() != NULL);
   return klass->IsInterface() || InstanceOf(klass);
 }
 
-inline bool Object::InstanceOf(const Class* klass) const {
+inline bool Object::InstanceOf(Class* klass) {
   DCHECK(klass != NULL);
   DCHECK(GetClass() != NULL);
   return klass->IsAssignableFrom(GetClass());
 }
 
-inline bool Object::IsClass() const {
+inline bool Object::IsClass() {
   Class* java_lang_Class = GetClass()->GetClass();
   return GetClass() == java_lang_Class;
 }
@@ -107,12 +106,7 @@
   return down_cast<Class*>(this);
 }
 
-inline const Class* Object::AsClass() const {
-  DCHECK(IsClass());
-  return down_cast<const Class*>(this);
-}
-
-inline bool Object::IsObjectArray() const {
+inline bool Object::IsObjectArray() {
   return IsArrayInstance() && !GetClass()->GetComponentType()->IsPrimitive();
 }
 
@@ -122,17 +116,11 @@
   return down_cast<ObjectArray<T>*>(this);
 }
 
-template<class T>
-inline const ObjectArray<T>* Object::AsObjectArray() const {
-  DCHECK(IsObjectArray());
-  return down_cast<const ObjectArray<T>*>(this);
-}
-
-inline bool Object::IsArrayInstance() const {
+inline bool Object::IsArrayInstance() {
   return GetClass()->IsArrayClass();
 }
 
-inline bool Object::IsArtField() const {
+inline bool Object::IsArtField() {
   return GetClass()->IsArtFieldClass();
 }
 
@@ -141,12 +129,7 @@
   return down_cast<ArtField*>(this);
 }
 
-inline const ArtField* Object::AsArtField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(IsArtField());
-  return down_cast<const ArtField*>(this);
-}
-
-inline bool Object::IsArtMethod() const {
+inline bool Object::IsArtMethod() {
   return GetClass()->IsArtMethodClass();
 }
 
@@ -155,12 +138,7 @@
   return down_cast<ArtMethod*>(this);
 }
 
-inline const ArtMethod* Object::AsArtMethod() const {
-  DCHECK(IsArtMethod());
-  return down_cast<const ArtMethod*>(this);
-}
-
-inline bool Object::IsReferenceInstance() const {
+inline bool Object::IsReferenceInstance() {
   return GetClass()->IsReferenceClass();
 }
 
@@ -169,11 +147,6 @@
   return down_cast<Array*>(this);
 }
 
-inline const Array* Object::AsArray() const {
-  DCHECK(IsArrayInstance());
-  return down_cast<const Array*>(this);
-}
-
 inline BooleanArray* Object::AsBooleanArray() {
   DCHECK(GetClass()->IsArrayClass());
   DCHECK(GetClass()->GetComponentType()->IsPrimitiveBoolean());
@@ -186,6 +159,13 @@
   return down_cast<ByteArray*>(this);
 }
 
+inline ByteArray* Object::AsByteSizedArray() {
+  DCHECK(GetClass()->IsArrayClass());
+  DCHECK(GetClass()->GetComponentType()->IsPrimitiveByte() ||
+         GetClass()->GetComponentType()->IsPrimitiveBoolean());
+  return down_cast<ByteArray*>(this);
+}
+
 inline CharArray* Object::AsCharArray() {
   DCHECK(GetClass()->IsArrayClass());
   DCHECK(GetClass()->GetComponentType()->IsPrimitiveChar());
@@ -198,6 +178,13 @@
   return down_cast<ShortArray*>(this);
 }
 
+inline ShortArray* Object::AsShortSizedArray() {
+  DCHECK(GetClass()->IsArrayClass());
+  DCHECK(GetClass()->GetComponentType()->IsPrimitiveShort() ||
+         GetClass()->GetComponentType()->IsPrimitiveChar());
+  return down_cast<ShortArray*>(this);
+}
+
 inline IntArray* Object::AsIntArray() {
   DCHECK(GetClass()->IsArrayClass());
   DCHECK(GetClass()->GetComponentType()->IsPrimitiveInt() ||
@@ -222,23 +209,23 @@
   return down_cast<Throwable*>(this);
 }
 
-inline bool Object::IsWeakReferenceInstance() const {
+inline bool Object::IsWeakReferenceInstance() {
   return GetClass()->IsWeakReferenceClass();
 }
 
-inline bool Object::IsSoftReferenceInstance() const {
+inline bool Object::IsSoftReferenceInstance() {
   return GetClass()->IsSoftReferenceClass();
 }
 
-inline bool Object::IsFinalizerReferenceInstance() const {
+inline bool Object::IsFinalizerReferenceInstance() {
   return GetClass()->IsFinalizerReferenceClass();
 }
 
-inline bool Object::IsPhantomReferenceInstance() const {
+inline bool Object::IsPhantomReferenceInstance() {
   return GetClass()->IsPhantomReferenceClass();
 }
 
-inline size_t Object::SizeOf() const {
+inline size_t Object::SizeOf() {
   size_t result;
   if (IsArrayInstance()) {
     result = AsArray()->SizeOf();
@@ -253,13 +240,13 @@
   return result;
 }
 
-inline uint32_t Object::GetField32(MemberOffset field_offset, bool is_volatile) const {
+inline uint32_t Object::GetField32(MemberOffset field_offset, bool is_volatile) {
   VerifyObject(this);
   const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
   const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
   if (UNLIKELY(is_volatile)) {
     int32_t result = *(reinterpret_cast<volatile int32_t*>(const_cast<int32_t*>(word_addr)));
-    QuasiAtomic::MembarLoadLoad();
+    QuasiAtomic::MembarLoadLoad();  // Ensure volatile loads don't re-order.
     return result;
   } else {
     return *word_addr;
@@ -276,7 +263,7 @@
   if (UNLIKELY(is_volatile)) {
     QuasiAtomic::MembarStoreStore();  // Ensure this store occurs after others in the queue.
     *word_addr = new_value;
-    QuasiAtomic::MembarStoreLoad();  // Ensure this store occurs before any loads.
+    QuasiAtomic::MembarStoreLoad();  // Ensure this store occurs before any volatile loads.
   } else {
     *word_addr = new_value;
   }
@@ -289,28 +276,31 @@
   return __sync_bool_compare_and_swap(addr, old_value, new_value);
 }
 
-inline uint64_t Object::GetField64(MemberOffset field_offset, bool is_volatile) const {
+inline uint64_t Object::GetField64(MemberOffset field_offset, bool is_volatile) {
   VerifyObject(this);
   const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
   const int64_t* addr = reinterpret_cast<const int64_t*>(raw_addr);
   if (UNLIKELY(is_volatile)) {
     uint64_t result = QuasiAtomic::Read64(addr);
-    QuasiAtomic::MembarLoadLoad();
+    QuasiAtomic::MembarLoadLoad();  // Ensure volatile loads don't re-order.
     return result;
   } else {
     return *addr;
   }
 }
 
-inline void Object::SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile) {
-  VerifyObject(this);
+inline void Object::SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile,
+                               bool this_is_valid) {
+  if (this_is_valid) {
+    VerifyObject(this);
+  }
   byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
   int64_t* addr = reinterpret_cast<int64_t*>(raw_addr);
   if (UNLIKELY(is_volatile)) {
     QuasiAtomic::MembarStoreStore();  // Ensure this store occurs after others in the queue.
     QuasiAtomic::Write64(addr, new_value);
     if (!QuasiAtomic::LongAtomicsUseMutexes()) {
-      QuasiAtomic::MembarStoreLoad();  // Ensure this store occurs before any loads.
+      QuasiAtomic::MembarStoreLoad();  // Ensure this store occurs before any volatile loads.
     } else {
       // Fence from from mutex is enough.
     }
@@ -319,12 +309,69 @@
   }
 }
 
-inline void Object::WriteBarrierField(const Object* dst, MemberOffset field_offset,
-                                      const Object* new_value) {
-  Runtime::Current()->GetHeap()->WriteBarrierField(dst, field_offset, new_value);
+inline bool Object::CasField64(MemberOffset field_offset, uint64_t old_value, uint64_t new_value) {
+  VerifyObject(this);
+  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  volatile uint64_t* addr = reinterpret_cast<volatile uint64_t*>(raw_addr);
+  return __sync_bool_compare_and_swap(addr, old_value, new_value);
 }
 
-inline void Object::VerifyObject(const Object* obj) {
+template<class T>
+inline T* Object::GetFieldObject(MemberOffset field_offset, bool is_volatile) {
+  VerifyObject(this);
+  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
+  HeapReference<T> objref = *objref_addr;
+
+  if (UNLIKELY(is_volatile)) {
+    QuasiAtomic::MembarLoadLoad();  // Ensure loads don't re-order.
+  }
+  T* result = objref.AsMirrorPtr();
+  VerifyObject(result);
+  return result;
+}
+
+inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value,
+                                                      bool is_volatile, bool this_is_valid) {
+  if (this_is_valid) {
+    VerifyObject(this);
+  }
+  VerifyObject(new_value);
+  HeapReference<Object> objref(HeapReference<Object>::FromMirrorPtr(new_value));
+  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
+  if (UNLIKELY(is_volatile)) {
+    QuasiAtomic::MembarStoreStore();  // Ensure this store occurs after others in the queue.
+    objref_addr->Assign(new_value);
+    QuasiAtomic::MembarStoreLoad();  // Ensure this store occurs before any loads.
+  } else {
+    objref_addr->Assign(new_value);
+  }
+}
+
+inline void Object::SetFieldObject(MemberOffset field_offset, Object* new_value, bool is_volatile,
+                                   bool this_is_valid) {
+  SetFieldObjectWithoutWriteBarrier(field_offset, new_value, is_volatile, this_is_valid);
+  if (new_value != nullptr) {
+    CheckFieldAssignment(field_offset, new_value);
+    Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+  }
+}
+
+inline bool Object::CasFieldObject(MemberOffset field_offset, Object* old_value, Object* new_value) {
+  VerifyObject(this);
+  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  volatile uint32_t* addr = reinterpret_cast<volatile uint32_t*>(raw_addr);
+  HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
+  HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+  bool success =  __sync_bool_compare_and_swap(addr, old_ref.reference_, new_ref.reference_);
+  if (success) {
+    Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+  }
+  return success;
+}
+
+inline void Object::VerifyObject(Object* obj) {
   if (kIsDebugBuild) {
     Runtime::Current()->GetHeap()->VerifyObject(obj);
   }
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index bdb3250..1251852 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -52,7 +52,7 @@
   Class* c = src->GetClass();
   if (c->IsArrayClass()) {
     if (!c->GetComponentType()->IsPrimitive()) {
-      const ObjectArray<Object>* array = dest->AsObjectArray<Object>();
+      ObjectArray<Object>* array = dest->AsObjectArray<Object>();
       heap->WriteBarrierArray(dest, 0, array->GetLength());
     }
   } else {
@@ -139,14 +139,15 @@
   return 0;
 }
 
-void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, const Object* new_value) {
-  const Class* c = GetClass();
+void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) {
+  Class* c = GetClass();
   if (Runtime::Current()->GetClassLinker() == NULL ||
+      !Runtime::Current()->IsStarted() ||
       !Runtime::Current()->GetHeap()->IsObjectValidationEnabled() ||
       !c->IsResolved()) {
     return;
   }
-  for (const Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
+  for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
     ObjectArray<ArtField>* fields = cur->GetIFields();
     if (fields != NULL) {
       size_t num_ref_ifields = cur->NumReferenceInstanceFields();
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 058aee7..6fe8b73 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -21,6 +21,7 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "cutils/atomic-inline.h"
+#include "object_reference.h"
 #include "offsets.h"
 
 namespace art {
@@ -51,17 +52,13 @@
 class String;
 class Throwable;
 
-// Classes shared with the managed side of the world need to be packed so that they don't have
-// extra platform specific padding.
-#define MANAGED PACKED(4)
-
 // Fields within mirror objects aren't accessed directly so that the appropriate amount of
 // handshaking is done with GC (for example, read and write barriers). This macro is used to
 // compute an offset for the Set/Get methods defined in Object that can safely access fields.
 #define OFFSET_OF_OBJECT_MEMBER(type, field) \
     MemberOffset(OFFSETOF_MEMBER(type, field))
 
-const bool kCheckFieldAssignments = false;
+constexpr bool kCheckFieldAssignments = false;
 
 // C++ mirror of java.lang.Object
 class MANAGED Object {
@@ -70,19 +67,17 @@
     return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
   }
 
-  Class* GetClass() const;
+  Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetClass(Class* new_klass);
+  void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
   // invoke-interface to detect incompatible interface types.
-  bool VerifierInstanceOf(const Class* klass) const
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool InstanceOf(const Class* klass) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t SizeOf() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -92,9 +87,9 @@
     return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
   }
 
-  LockWord GetLockWord() const;
+  LockWord GetLockWord();
   void SetLockWord(LockWord new_val);
-  bool CasLockWord(LockWord old_val, LockWord new_val);
+  bool CasLockWord(LockWord old_val, LockWord new_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   uint32_t GetLockOwnerThreadId();
 
   void MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -111,111 +106,113 @@
 
   void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsClass() const;
+  bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  Class* AsClass();
+  Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  const Class* AsClass() const;
-
-  bool IsObjectArray() const;
+  bool IsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   template<class T>
-  ObjectArray<T>* AsObjectArray();
+  ObjectArray<T>* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  template<class T>
-  const ObjectArray<T>* AsObjectArray() const;
+  bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsArrayInstance() const;
+  Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  Array* AsArray();
+  BooleanArray* AsBooleanArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ByteArray* AsByteArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ByteArray* AsByteSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  const Array* AsArray() const;
+  CharArray* AsCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ShortArray* AsShortArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  BooleanArray* AsBooleanArray();
-  ByteArray* AsByteArray();
-  CharArray* AsCharArray();
-  ShortArray* AsShortArray();
-  IntArray* AsIntArray();
-  LongArray* AsLongArray();
+  IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  String* AsString();
+  String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsArtMethod() const;
+  bool IsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* AsArtMethod();
+  ArtMethod* AsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  const ArtMethod* AsArtMethod() const;
-
-  bool IsArtField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   ArtField* AsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  const ArtField* AsArtField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsReferenceInstance() const;
+  bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsWeakReferenceInstance() const;
+  bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsSoftReferenceInstance() const;
+  bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsFinalizerReferenceInstance() const;
+  bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsPhantomReferenceInstance() const;
+  // Accessor for Java type fields.
+  template<class T> T* GetFieldObject(MemberOffset field_offset, bool is_volatile)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value,
+                                         bool is_volatile, bool this_is_valid = true)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetFieldObject(MemberOffset field_offset, Object* new_value, bool is_volatile,
+                      bool this_is_valid = true)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool CasFieldObject(MemberOffset field_offset, Object* old_value, Object* new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Accessors for Java type fields
-  template<class T>
-  T GetFieldObject(MemberOffset field_offset, bool is_volatile) const {
-    T result = reinterpret_cast<T>(GetField32(field_offset, is_volatile));
-    VerifyObject(result);
-    return result;
-  }
-
-  void SetFieldObject(MemberOffset field_offset, const Object* new_value, bool is_volatile,
-                      bool this_is_valid = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    VerifyObject(new_value);
-    SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
-    if (new_value != NULL) {
-      CheckFieldAssignment(field_offset, new_value);
-      WriteBarrierField(this, field_offset, new_value);
-    }
-  }
-
-  Object** GetFieldObjectAddr(MemberOffset field_offset) ALWAYS_INLINE {
+  HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset) ALWAYS_INLINE {
     VerifyObject(this);
-    return reinterpret_cast<Object**>(reinterpret_cast<byte*>(this) + field_offset.Int32Value());
+    return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<byte*>(this) +
+        field_offset.Int32Value());
   }
 
-  uint32_t GetField32(MemberOffset field_offset, bool is_volatile) const;
+  uint32_t GetField32(MemberOffset field_offset, bool is_volatile);
 
   void SetField32(MemberOffset field_offset, uint32_t new_value, bool is_volatile,
                   bool this_is_valid = true);
 
-  bool CasField32(MemberOffset field_offset, uint32_t old_value, uint32_t new_value);
+  bool CasField32(MemberOffset field_offset, uint32_t old_value, uint32_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint64_t GetField64(MemberOffset field_offset, bool is_volatile) const;
+  uint64_t GetField64(MemberOffset field_offset, bool is_volatile);
 
-  void SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile);
+  void SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile,
+                  bool this_is_valid = true);
+
+  bool CasField64(MemberOffset field_offset, uint64_t old_value, uint64_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   template<typename T>
-  void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile, bool this_is_valid = true) {
+  void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile,
+                   bool this_is_valid = true) {
+#ifndef __LP64__
     SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
+#else
+    SetField64(field_offset, reinterpret_cast<uint64_t>(new_value), is_volatile, this_is_valid);
+#endif
   }
 
  protected:
   // Accessors for non-Java type fields
   template<class T>
-  T GetFieldPtr(MemberOffset field_offset, bool is_volatile) const {
+  T GetFieldPtr(MemberOffset field_offset, bool is_volatile) {
+#ifndef __LP64__
     return reinterpret_cast<T>(GetField32(field_offset, is_volatile));
+#else
+    return reinterpret_cast<T>(GetField64(field_offset, is_volatile));
+#endif
   }
 
  private:
-  static void VerifyObject(const Object* obj) ALWAYS_INLINE;
+  static void VerifyObject(Object* obj) ALWAYS_INLINE;
   // Verify the type correctness of stores to fields.
-  void CheckFieldAssignmentImpl(MemberOffset field_offset, const Object* new_value)
+  void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CheckFieldAssignment(MemberOffset field_offset, const Object* new_value)
+  void CheckFieldAssignment(MemberOffset field_offset, Object* new_value)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     if (kCheckFieldAssignments) {
       CheckFieldAssignmentImpl(field_offset, new_value);
@@ -225,11 +222,9 @@
   // Generate an identity hash code.
   static int32_t GenerateIdentityHashCode();
 
-  // Write barrier called post update to a reference bearing field.
-  static void WriteBarrierField(const Object* dst, MemberOffset offset, const Object* new_value);
-
-  Class* klass_;
-
+  // The Class representing the type of the object.
+  HeapReference<Class> klass_;
+  // Monitor and hash code information.
   uint32_t monitor_;
 
   friend class art::ImageWriter;
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 6a50dfe..c342479 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -25,6 +25,7 @@
 #include "runtime.h"
 #include "sirt_ref.h"
 #include "thread.h"
+#include <string>
 
 namespace art {
 namespace mirror {
@@ -32,8 +33,8 @@
 template<class T>
 inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
                                              int32_t length, gc::AllocatorType allocator_type) {
-  Array* array = Array::Alloc<true>(self, object_array_class, length, sizeof(Object*),
-                                    allocator_type);
+  Array* array = Array::Alloc<true>(self, object_array_class, length,
+                                    sizeof(HeapReference<Object>), allocator_type);
   if (UNLIKELY(array == nullptr)) {
     return nullptr;
   } else {
@@ -49,12 +50,12 @@
 }
 
 template<class T>
-inline T* ObjectArray<T>::Get(int32_t i) const {
+inline T* ObjectArray<T>::Get(int32_t i) {
   if (UNLIKELY(!CheckIsValidIndex(i))) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return NULL;
   }
-  return GetWithoutChecks(i);
+  return GetFieldObject<T>(OffsetOfElement(i), false);
 }
 
 template<class T>
@@ -72,7 +73,7 @@
 template<class T>
 inline void ObjectArray<T>::Set(int32_t i, T* object) {
   if (LIKELY(CheckIsValidIndex(i) && CheckAssignable(object))) {
-    SetWithoutChecks(i, object);
+    SetFieldObject(OffsetOfElement(i), object, false);
   } else {
     DCHECK(Thread::Current()->IsExceptionPending());
   }
@@ -82,72 +83,123 @@
 inline void ObjectArray<T>::SetWithoutChecks(int32_t i, T* object) {
   DCHECK(CheckIsValidIndex(i));
   DCHECK(CheckAssignable(object));
-  MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*));
-  SetFieldObject(data_offset, object, false);
+  SetFieldObject(OffsetOfElement(i), object, false);
 }
 
 template<class T>
-inline void ObjectArray<T>::SetPtrWithoutChecks(int32_t i, T* object) {
+inline void ObjectArray<T>::SetWithoutChecksAndWriteBarrier(int32_t i, T* object) {
   DCHECK(CheckIsValidIndex(i));
-  // TODO enable this check. It fails when writing the image in ImageWriter::FixupObjectArray.
+  // TODO:  enable this check. It fails when writing the image in ImageWriter::FixupObjectArray.
   // DCHECK(CheckAssignable(object));
-  MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*));
-  SetFieldPtr(data_offset, object, false);
+  SetFieldObjectWithoutWriteBarrier(OffsetOfElement(i), object, false);
 }
 
 template<class T>
-inline T* ObjectArray<T>::GetWithoutChecks(int32_t i) const {
+inline T* ObjectArray<T>::GetWithoutChecks(int32_t i) {
   DCHECK(CheckIsValidIndex(i));
-  MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*));
-  return GetFieldObject<T*>(data_offset, false);
+  return GetFieldObject<T>(OffsetOfElement(i), false);
 }
 
 template<class T>
-inline void ObjectArray<T>::Copy(const ObjectArray<T>* src, int src_pos,
-                                 ObjectArray<T>* dst, int dst_pos,
-                                 size_t length) {
-  if (src->CheckIsValidIndex(src_pos) &&
-      src->CheckIsValidIndex(src_pos + length - 1) &&
-      dst->CheckIsValidIndex(dst_pos) &&
-      dst->CheckIsValidIndex(dst_pos + length - 1)) {
-    MemberOffset src_offset(DataOffset(sizeof(Object*)).Int32Value() + src_pos * sizeof(Object*));
-    MemberOffset dst_offset(DataOffset(sizeof(Object*)).Int32Value() + dst_pos * sizeof(Object*));
-    Class* array_class = dst->GetClass();
-    gc::Heap* heap = Runtime::Current()->GetHeap();
-    if (array_class == src->GetClass()) {
-      // No need for array store checks if arrays are of the same type
-      for (size_t i = 0; i < length; i++) {
-        Object* object = src->GetFieldObject<Object*>(src_offset, false);
-        heap->VerifyObject(object);
-        // directly set field, we do a bulk write barrier at the end
-        dst->SetField32(dst_offset, reinterpret_cast<uint32_t>(object), false, true);
-        src_offset = MemberOffset(src_offset.Uint32Value() + sizeof(Object*));
-        dst_offset = MemberOffset(dst_offset.Uint32Value() + sizeof(Object*));
-      }
+inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src,
+                                              int32_t src_pos, int32_t count) {
+  if (kIsDebugBuild) {
+    for (int i = 0; i < count; ++i) {
+      // The Get will perform the VerifyObject.
+      src->GetWithoutChecks(src_pos + i);
+    }
+  }
+  // Perform the memmove using int memmove then perform the write barrier.
+  CHECK_EQ(sizeof(HeapReference<T>), sizeof(uint32_t));
+  IntArray* dstAsIntArray = reinterpret_cast<IntArray*>(this);
+  IntArray* srcAsIntArray = reinterpret_cast<IntArray*>(src);
+  dstAsIntArray->Memmove(dst_pos, srcAsIntArray, src_pos, count);
+  Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+  if (kIsDebugBuild) {
+    for (int i = 0; i < count; ++i) {
+      // The Get will perform the VerifyObject.
+      GetWithoutChecks(dst_pos + i);
+    }
+  }
+}
+
+template<class T>
+inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src,
+                                             int32_t src_pos, int32_t count) {
+  if (kIsDebugBuild) {
+    for (int i = 0; i < count; ++i) {
+      // The Get will perform the VerifyObject.
+      src->GetWithoutChecks(src_pos + i);
+    }
+  }
+  // Perform the memmove using int memcpy then perform the write barrier.
+  CHECK_EQ(sizeof(HeapReference<T>), sizeof(uint32_t));
+  IntArray* dstAsIntArray = reinterpret_cast<IntArray*>(this);
+  IntArray* srcAsIntArray = reinterpret_cast<IntArray*>(src);
+  dstAsIntArray->Memcpy(dst_pos, srcAsIntArray, src_pos, count);
+  Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+  if (kIsDebugBuild) {
+    for (int i = 0; i < count; ++i) {
+      // The Get will perform the VerifyObject.
+      GetWithoutChecks(dst_pos + i);
+    }
+  }
+}
+
+template<class T>
+inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src,
+                                                     int32_t src_pos, int32_t count,
+                                                     bool throw_exception) {
+  DCHECK_NE(this, src)
+      << "This case should be handled with memmove that handles overlaps correctly";
+  // We want to avoid redundant IsAssignableFrom checks where possible, so we cache a class that
+  // we know is assignable to the destination array's component type.
+  Class* dst_class = GetClass()->GetComponentType();
+  Class* lastAssignableElementClass = dst_class;
+
+  Object* o = nullptr;
+  int i = 0;
+  for (; i < count; ++i) {
+    // The follow get operations force the objects to be verified.
+    o = src->GetWithoutChecks(src_pos + i);
+    if (o == nullptr) {
+      // Null is always assignable.
+      SetWithoutChecks(dst_pos + i, nullptr);
     } else {
-      Class* element_class = array_class->GetComponentType();
-      CHECK(!element_class->IsPrimitive());
-      for (size_t i = 0; i < length; i++) {
-        Object* object = src->GetFieldObject<Object*>(src_offset, false);
-        if (object != NULL && !object->InstanceOf(element_class)) {
-          dst->ThrowArrayStoreException(object);
-          return;
-        }
-        heap->VerifyObject(object);
-        // directly set field, we do a bulk write barrier at the end
-        dst->SetField32(dst_offset, reinterpret_cast<uint32_t>(object), false, true);
-        src_offset = MemberOffset(src_offset.Uint32Value() + sizeof(Object*));
-        dst_offset = MemberOffset(dst_offset.Uint32Value() + sizeof(Object*));
+      // TODO: use the underlying class reference to avoid uncompression when not necessary.
+      Class* o_class = o->GetClass();
+      if (LIKELY(lastAssignableElementClass == o_class)) {
+        SetWithoutChecks(dst_pos + i, o);
+      } else if (LIKELY(dst_class->IsAssignableFrom(o_class))) {
+        lastAssignableElementClass = o_class;
+        SetWithoutChecks(dst_pos + i, o);
+      } else {
+        // Can't put this element into the array, break to perform write-barrier and throw
+        // exception.
+        break;
       }
     }
-    heap->WriteBarrierArray(dst, dst_pos, length);
-  } else {
-    DCHECK(Thread::Current()->IsExceptionPending());
+  }
+  Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+  if (UNLIKELY(i != count)) {
+    std::string actualSrcType(PrettyTypeOf(o));
+    std::string dstType(PrettyTypeOf(this));
+    Thread* self = Thread::Current();
+    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+    if (throw_exception) {
+      self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
+                               "source[%d] of type %s cannot be stored in destination array of type %s",
+                               src_pos + i, actualSrcType.c_str(), dstType.c_str());
+    } else {
+      LOG(FATAL) << StringPrintf("source[%d] of type %s cannot be stored in destination array of type %s",
+                                 src_pos + i, actualSrcType.c_str(), dstType.c_str());
+    }
   }
 }
 
 template<class T>
 inline ObjectArray<T>* ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
+  DCHECK_GE(new_length, 0);
   // We may get copied by a compacting GC.
   SirtRef<ObjectArray<T> > sirt_this(self, this);
   gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -155,11 +207,17 @@
       heap->GetCurrentNonMovingAllocator();
   ObjectArray<T>* new_array = Alloc(self, GetClass(), new_length, allocator_type);
   if (LIKELY(new_array != nullptr)) {
-    Copy(sirt_this.get(), 0, new_array, 0, std::min(sirt_this->GetLength(), new_length));
+    new_array->AssignableMemcpy(0, sirt_this.get(), 0, std::min(sirt_this->GetLength(), new_length));
   }
   return new_array;
 }
 
+template<class T>
+inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) {
+  return MemberOffset(DataOffset(sizeof(HeapReference<Object>)).Int32Value() +
+                      (i * sizeof(HeapReference<Object>)));
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 5da8845..347494e 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -33,7 +33,7 @@
   static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  T* Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  T* Get(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns true if the object can be stored into the array. If not, throws
   // an ArrayStoreException and returns false.
@@ -44,22 +44,30 @@
   // Set element without bound and element type checks, to be used in limited
   // circumstances, such as during boot image writing
   void SetWithoutChecks(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Set element without bound and element type checks, to be used in limited circumstances, such
-  // as during boot image writing. Does not do write barrier.
-  void SetPtrWithoutChecks(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  T* GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
+  void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
+                         int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static void Copy(const ObjectArray<T>* src, int src_pos,
-                   ObjectArray<T>* dst, int dst_pos,
-                   size_t length)
+  // Copy src into this array assuming no overlap and without assignability checks.
+  void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
+                        int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Copy src into this array with assignability checks.
+  void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
+                                int32_t count, bool throw_exception)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
+  static MemberOffset OffsetOfElement(int32_t i);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray);
 };
 
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
new file mode 100644
index 0000000..b30890f
--- /dev/null
+++ b/runtime/mirror/object_reference.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_
+#define ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_
+
+#include "locks.h"
+
+namespace art {
+namespace mirror {
+
+class Object;
+
+// Classes shared with the managed side of the world need to be packed so that they don't have
+// extra platform specific padding.
+#define MANAGED PACKED(4)
+
+// Value type representing a reference to a mirror::Object of type MirrorType.
+template<bool kPoisonReferences, class MirrorType>
+class MANAGED ObjectReference {
+ public:
+  MirrorType* AsMirrorPtr() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return UnCompress();
+  }
+
+  void Assign(MirrorType* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    reference_ = Compress(other);
+  }
+
+  void Clear() {
+    reference_ = 0;
+  }
+
+  uint32_t AsVRegValue() const {
+    return reference_;
+  }
+
+ protected:
+  ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : reference_(Compress(mirror_ptr)) {
+  }
+
+  // Compress reference to its bit representation.
+  static uint32_t Compress(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
+    return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
+  }
+
+  // Uncompress an encoded reference from its bit representation.
+  MirrorType* UnCompress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_;
+    return reinterpret_cast<MirrorType*>(as_bits);
+  }
+
+  friend class Object;
+
+  // The encoded reference to a mirror::Object.
+  uint32_t reference_;
+};
+
+// References between objects within the managed heap.
+template<class MirrorType>
+class MANAGED HeapReference : public ObjectReference<false, MirrorType> {
+ public:
+  static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return HeapReference<MirrorType>(mirror_ptr);
+  }
+ private:
+  HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ObjectReference<false, MirrorType>(mirror_ptr) {}
+};
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 3637181..2af32da 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -77,7 +77,7 @@
   EXPECT_EQ(CLASS_COMPONENT_TYPE_OFFSET, Class::ComponentTypeOffset().Int32Value());
 
   EXPECT_EQ(ARRAY_LENGTH_OFFSET, Array::LengthOffset().Int32Value());
-  EXPECT_EQ(OBJECT_ARRAY_DATA_OFFSET, Array::DataOffset(sizeof(Object*)).Int32Value());
+  EXPECT_EQ(OBJECT_ARRAY_DATA_OFFSET, Array::DataOffset(sizeof(HeapReference<Object>)).Int32Value());
 
   EXPECT_EQ(STRING_VALUE_OFFSET, String::ValueOffset().Int32Value());
   EXPECT_EQ(STRING_COUNT_OFFSET, String::CountOffset().Int32Value());
@@ -85,7 +85,8 @@
   EXPECT_EQ(STRING_DATA_OFFSET, Array::DataOffset(sizeof(uint16_t)).Int32Value());
 
   EXPECT_EQ(METHOD_DEX_CACHE_METHODS_OFFSET, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
-  EXPECT_EQ(METHOD_CODE_OFFSET, ArtMethod::EntryPointFromCompiledCodeOffset().Int32Value());
+  EXPECT_EQ(METHOD_PORTABLE_CODE_OFFSET, ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value());
+  EXPECT_EQ(METHOD_QUICK_CODE_OFFSET, ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
 }
 
 TEST_F(ObjectTest, IsInSamePackage) {
@@ -295,7 +296,7 @@
   uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id);
 
   ArtField* field = FindFieldFromCode<StaticObjectRead, true>(field_idx, clinit, Thread::Current(),
-                                                              sizeof(Object*));
+                                                              sizeof(HeapReference<Object>));
   Object* s0 = field->GetObj(klass);
   EXPECT_TRUE(s0 != NULL);
 
diff --git a/runtime/mirror/proxy.h b/runtime/mirror/proxy.h
index 18a84dc..ff019c6 100644
--- a/runtime/mirror/proxy.h
+++ b/runtime/mirror/proxy.h
@@ -29,24 +29,28 @@
 // has the static fields used to implement reflection on proxy objects.
 class MANAGED SynthesizedProxyClass : public Class {
  public:
-  ObjectArray<Class>* GetInterfaces() {
-    return interfaces_;
+  ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<ObjectArray<Class> >(OFFSET_OF_OBJECT_MEMBER(SynthesizedProxyClass,
+                                                                       interfaces_),
+                                               false);
   }
 
-  ObjectArray<ObjectArray<Class> >* GetThrows() {
-    return throws_;
+  ObjectArray<ObjectArray<Class> >* GetThrows()  SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<ObjectArray<ObjectArray<Class> > >(OFFSET_OF_OBJECT_MEMBER(SynthesizedProxyClass,
+                                                                                     throws_),
+                                               false);
   }
 
  private:
-  ObjectArray<Class>* interfaces_;
-  ObjectArray<ObjectArray<Class> >* throws_;
+  HeapReference<ObjectArray<Class> > interfaces_;
+  HeapReference<ObjectArray<ObjectArray<Class> > > throws_;
   DISALLOW_IMPLICIT_CONSTRUCTORS(SynthesizedProxyClass);
 };
 
 // C++ mirror of java.lang.reflect.Proxy.
 class MANAGED Proxy : public Object {
  private:
-  Object* h_;
+  HeapReference<Object> h_;
 
   friend struct art::ProxyOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index d1be4dc..73d2673 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -29,24 +29,23 @@
 // C++ mirror of java.lang.StackTraceElement
 class MANAGED StackTraceElement : public Object {
  public:
-  const String* GetDeclaringClass() const {
-    return GetFieldObject<const String*>(
-        OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_), false);
+  String* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
+                                  false);
   }
 
-  const String* GetMethodName() const {
-    return GetFieldObject<const String*>(
-        OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_), false);
+  String* GetMethodName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
+                                  false);
   }
 
-  const String* GetFileName() const {
-    return GetFieldObject<const String*>(
-        OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_), false);
+  String* GetFileName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
+                                  false);
   }
 
-  int32_t GetLineNumber() const {
-    return GetField32(
-        OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_), false);
+  int32_t GetLineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_), false);
   }
 
   static StackTraceElement* Alloc(Thread* self,
@@ -63,9 +62,9 @@
 
  private:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
-  String* declaring_class_;
-  String* file_name_;
-  String* method_name_;
+  HeapReference<String> declaring_class_;
+  HeapReference<String> file_name_;
+  HeapReference<String> method_name_;
   int32_t line_number_;
 
   static Class* GetStackTraceElement() {
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 1f756a1..10ae066 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -29,23 +29,19 @@
 namespace art {
 namespace mirror {
 
-const CharArray* String::GetCharArray() const {
-  return GetFieldObject<const CharArray*>(ValueOffset(), false);
-}
-
 CharArray* String::GetCharArray() {
-  return GetFieldObject<CharArray*>(ValueOffset(), false);
+  return GetFieldObject<CharArray>(ValueOffset(), false);
 }
 
 void String::ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()));
 }
 
-int32_t String::GetUtfLength() const {
+int32_t String::GetUtfLength() {
   return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength());
 }
 
-int32_t String::FastIndexOf(int32_t ch, int32_t start) const {
+int32_t String::FastIndexOf(int32_t ch, int32_t start) {
   int32_t count = GetLength();
   if (start < 0) {
     start = 0;
@@ -97,13 +93,13 @@
   return result;
 }
 
-int32_t String::GetLength() const {
+int32_t String::GetLength() {
   int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, count_), false);
   DCHECK(result >= 0 && result <= GetCharArray()->GetLength());
   return result;
 }
 
-uint16_t String::CharAt(int32_t index) const {
+uint16_t String::CharAt(int32_t index) {
   // TODO: do we need this? Equals is the only caller, and could
   // bounds check itself.
   DCHECK_GE(count_, 0);  // ensures the unsigned comparison is safe.
@@ -179,7 +175,7 @@
   return string;
 }
 
-bool String::Equals(const String* that) const {
+bool String::Equals(String* that) {
   if (this == that) {
     // Quick reference equality test
     return true;
@@ -201,7 +197,7 @@
   }
 }
 
-bool String::Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) const {
+bool String::Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) {
   if (this->GetLength() != that_length) {
     return false;
   } else {
@@ -214,7 +210,7 @@
   }
 }
 
-bool String::Equals(const char* modified_utf8) const {
+bool String::Equals(const char* modified_utf8) {
   for (int32_t i = 0; i < GetLength(); ++i) {
     uint16_t ch = GetUtf16FromUtf8(&modified_utf8);
     if (ch == '\0' || ch != CharAt(i)) {
@@ -224,7 +220,7 @@
   return *modified_utf8 == '\0';
 }
 
-bool String::Equals(const StringPiece& modified_utf8) const {
+bool String::Equals(const StringPiece& modified_utf8) {
   const char* p = modified_utf8.data();
   for (int32_t i = 0; i < GetLength(); ++i) {
     uint16_t ch = GetUtf16FromUtf8(&p);
@@ -236,7 +232,7 @@
 }
 
 // Create a modified UTF-8 encoded std::string from a java/lang/String object.
-std::string String::ToModifiedUtf8() const {
+std::string String::ToModifiedUtf8() {
   const uint16_t* chars = GetCharArray()->GetData() + GetOffset();
   size_t byte_count = GetUtfLength();
   std::string result(byte_count, static_cast<char>(0));
@@ -259,9 +255,9 @@
 }
 #endif
 
-int32_t String::CompareTo(String* rhs) const {
+int32_t String::CompareTo(String* rhs) {
   // Quick test for comparison of a string with itself.
-  const String* lhs = this;
+  String* lhs = this;
   if (lhs == rhs) {
     return 0;
   }
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 4bbcb9c..a82b26c 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -44,24 +44,23 @@
     return OFFSET_OF_OBJECT_MEMBER(String, offset_);
   }
 
-  const CharArray* GetCharArray() const;
-  CharArray* GetCharArray();
+  CharArray* GetCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  int32_t GetOffset() const {
+  int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     int32_t result = GetField32(OffsetOffset(), false);
     DCHECK_LE(0, result);
     return result;
   }
 
-  int32_t GetLength() const;
+  int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  int32_t GetUtfLength() const;
+  int32_t GetUtfLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint16_t CharAt(int32_t index) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint16_t CharAt(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -78,29 +77,28 @@
                                        const char* utf8_data_in)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool Equals(const char* modified_utf8) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool Equals(const char* modified_utf8) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // TODO: do we need this overload? give it a more intention-revealing name.
-  bool Equals(const StringPiece& modified_utf8) const
+  bool Equals(const StringPiece& modified_utf8)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool Equals(const String* that) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool Equals(String* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Compare UTF-16 code point values not in a locale-sensitive manner
   int Compare(int32_t utf16_length, const char* utf8_data_in);
 
   // TODO: do we need this overload? give it a more intention-revealing name.
   bool Equals(const uint16_t* that_chars, int32_t that_offset,
-              int32_t that_length) const
+              int32_t that_length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Create a modified UTF-8 encoded std::string from a java/lang/String object.
-  std::string ToModifiedUtf8() const;
+  std::string ToModifiedUtf8() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  int32_t FastIndexOf(int32_t ch, int32_t start) const;
+  int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  int32_t CompareTo(String* other) const;
+  int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static Class* GetJavaLangString() {
     DCHECK(java_lang_String_ != NULL);
@@ -123,7 +121,7 @@
     SetField32(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count, false);
   }
 
-  void SetOffset(int32_t new_offset) {
+  void SetOffset(int32_t new_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK_LE(0, new_offset);
     DCHECK_GE(GetLength(), new_offset);
     SetField32(OFFSET_OF_OBJECT_MEMBER(String, offset_), new_offset, false);
@@ -138,7 +136,7 @@
   void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
-  CharArray* array_;
+  HeapReference<CharArray> array_;
 
   int32_t count_;
 
@@ -155,8 +153,8 @@
 
 class MANAGED StringClass : public Class {
  private:
-  CharArray* ASCII_;
-  Object* CASE_INSENSITIVE_ORDER_;
+  HeapReference<CharArray> ASCII_;
+  HeapReference<Object> CASE_INSENSITIVE_ORDER_;
   uint32_t REPLACEMENT_CHAR_;
   int64_t serialVersionUID_;
   friend struct art::StringClassOffsets;  // for verifying offset information
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index b55db72..2318b74 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -33,22 +33,22 @@
 Class* Throwable::java_lang_Throwable_ = NULL;
 
 void Throwable::SetCause(Throwable* cause) {
-  CHECK(cause != NULL);
+  CHECK(cause != nullptr);
   CHECK(cause != this);
-  Throwable* current_cause = GetFieldObject<Throwable*>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_),
-                                                        false);
+  Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_),
+                                                       false);
   CHECK(current_cause == NULL || current_cause == this);
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause, false);
 }
 
-bool Throwable::IsCheckedException() const {
+bool Throwable::IsCheckedException() {
   if (InstanceOf(WellKnownClasses::ToClass(WellKnownClasses::java_lang_Error))) {
     return false;
   }
   return !InstanceOf(WellKnownClasses::ToClass(WellKnownClasses::java_lang_RuntimeException));
 }
 
-std::string Throwable::Dump() const {
+std::string Throwable::Dump() {
   std::string result(PrettyTypeOf(this));
   result += ": ";
   String* msg = GetDetailMessage();
@@ -74,7 +74,7 @@
                              source_file, line_number);
     }
   }
-  Throwable* cause = GetFieldObject<Throwable*>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), false);
+  Throwable* cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), false);
   if (cause != NULL && cause != this) {  // Constructor makes cause == this by default.
     result += "Caused by: ";
     result += cause->Dump();
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 5a90599..bc9848a 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -33,16 +33,16 @@
   void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message, false);
   }
-  String* GetDetailMessage() const {
-    return GetFieldObject<String*>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), false);
+  String* GetDetailMessage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), false);
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // This is a runtime version of initCause, you shouldn't use it if initCause may have been
   // overridden. Also it asserts rather than throwing exceptions. Currently this is only used
   // in cases like the verifier where the checks cannot fail and initCause isn't overridden.
   void SetCause(Throwable* cause) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsCheckedException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsCheckedException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static Class* GetJavaLangThrowable() {
     DCHECK(java_lang_Throwable_ != NULL);
@@ -55,16 +55,16 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
-  Object* GetStackState() const {
-    return GetFieldObject<Object*>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), true);
+  Object* GetStackState() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldObject<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), true);
   }
 
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
-  Throwable* cause_;
-  String* detail_message_;
-  Object* stack_state_;  // Note this is Java volatile:
-  Object* stack_trace_;
-  Object* suppressed_exceptions_;
+  HeapReference<Throwable> cause_;
+  HeapReference<String> detail_message_;
+  HeapReference<Object> stack_state_;  // Note this is Java volatile:
+  HeapReference<Object> stack_trace_;
+  HeapReference<Object> suppressed_exceptions_;
 
   static Class* java_lang_Throwable_;