Object model changes to support 64bit.

Modify mirror objects so that references between them use an ObjectReference
value type rather than an Object* so that functionality to compress larger
references can be captured in the ObjectRefererence implementation.
ObjectReferences are 32bit and all other aspects of object layout remain as
they are currently.

Expand fields in objects holding pointers so they can hold 64bit pointers. Its
expected the size of these will come down by improving where we hold compiler
meta-data.
Stub out x86_64 architecture specific runtime implementation.
Modify OutputStream so that reads and writes are of unsigned quantities.
Make the use of portable or quick code more explicit.
Templatize AtomicInteger to support more than just int32_t as a type.
Add missing, and fix issues relating to, missing annotalysis information on the
mutator lock.
Refactor and share implementations for array copy between System and uses
elsewhere in the runtime.
Fix numerous 64bit build issues.

Change-Id: I1a5694c251a42c9eff71084dfdd4b51fff716822
diff --git a/runtime/stack.h b/runtime/stack.h
index 0692390..8466069 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -19,8 +19,10 @@
 
 #include "dex_file.h"
 #include "instrumentation.h"
+#include "base/casts.h"
 #include "base/macros.h"
 #include "arch/context.h"
+#include "mirror/object_reference.h"
 
 #include <stdint.h>
 #include <string>
@@ -96,16 +98,33 @@
   kVRegNonSpecialTempBaseReg = -3,
 };
 
+// A reference from the shadow stack to a MirrorType object within the Java heap.
+template<class MirrorType>
+class MANAGED StackReference : public mirror::ObjectReference<false, MirrorType> {
+ public:
+  StackReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : mirror::ObjectReference<false, MirrorType>(nullptr) {}
+
+  static StackReference<MirrorType> FromMirrorPtr(MirrorType* p)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return StackReference<MirrorType>(p);
+  }
+
+ private:
+  StackReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : mirror::ObjectReference<false, MirrorType>(p) {}
+};
+
 // ShadowFrame has 3 possible layouts:
 //  - portable - a unified array of VRegs and references. Precise references need GC maps.
 //  - interpreter - separate VRegs and reference arrays. References are in the reference array.
 //  - JNI - just VRegs, but where every VReg holds a reference.
 class ShadowFrame {
  public:
-  // Compute size of ShadowFrame in bytes.
+  // Compute size of ShadowFrame in bytes assuming it has a reference array.
   static size_t ComputeSize(uint32_t num_vregs) {
     return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) +
-           (sizeof(mirror::Object*) * num_vregs);
+           (sizeof(StackReference<mirror::Object>) * num_vregs);
   }
 
   // Create ShadowFrame in heap for deoptimization.
@@ -195,22 +214,19 @@
   }
 
   template <bool kChecked = false>
-  mirror::Object* GetVRegReference(size_t i) const {
+  mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK_LT(i, NumberOfVRegs());
     if (HasReferenceArray()) {
-      mirror::Object* ref = References()[i];
+      mirror::Object* ref = References()[i].AsMirrorPtr();
       if (kChecked) {
         CHECK(VerifyReference(ref)) << "VReg " << i << "(" << ref
                                     << ") is in protected space, reference array " << true;
       }
-      // If the vreg reference is not equal to the vreg then the vreg reference is stale.
-      if (UNLIKELY(reinterpret_cast<uint32_t>(ref) != vregs_[i])) {
-        return nullptr;
-      }
       return ref;
     } else {
-      const uint32_t* vreg = &vregs_[i];
-      mirror::Object* ref = *reinterpret_cast<mirror::Object* const*>(vreg);
+      const uint32_t* vreg_ptr = &vregs_[i];
+      mirror::Object* ref =
+          reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
       if (kChecked) {
         CHECK(VerifyReference(ref)) << "VReg " << i
             << "(" << ref << ") is in protected space, reference array " << false;
@@ -231,7 +247,7 @@
     // This is needed for moving collectors since these can update the vreg references if they
     // happen to agree with references in the reference array.
     if (kMovingCollector && HasReferenceArray()) {
-      References()[i] = nullptr;
+      References()[i].Clear();
     }
   }
 
@@ -242,7 +258,7 @@
     // This is needed for moving collectors since these can update the vreg references if they
     // happen to agree with references in the reference array.
     if (kMovingCollector && HasReferenceArray()) {
-      References()[i] = nullptr;
+      References()[i].Clear();
     }
   }
 
@@ -255,8 +271,8 @@
     // This is needed for moving collectors since these can update the vreg references if they
     // happen to agree with references in the reference array.
     if (kMovingCollector && HasReferenceArray()) {
-      References()[i] = nullptr;
-      References()[i + 1] = nullptr;
+      References()[i].Clear();
+      References()[i + 1].Clear();
     }
   }
 
@@ -269,24 +285,24 @@
     // This is needed for moving collectors since these can update the vreg references if they
     // happen to agree with references in the reference array.
     if (kMovingCollector && HasReferenceArray()) {
-      References()[i] = nullptr;
-      References()[i + 1] = nullptr;
+      References()[i].Clear();
+      References()[i + 1].Clear();
     }
   }
 
-  void SetVRegReference(size_t i, mirror::Object* val) {
+  void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK_LT(i, NumberOfVRegs());
     DCHECK(!kMovingCollector || VerifyReference(val))
         << "VReg " << i << "(" << val << ") is in protected space";
     uint32_t* vreg = &vregs_[i];
-    *reinterpret_cast<mirror::Object**>(vreg) = val;
+    reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
     if (HasReferenceArray()) {
-      References()[i] = val;
+      References()[i].Assign(val);
     }
   }
 
-  mirror::ArtMethod* GetMethod() const {
-    DCHECK_NE(method_, static_cast<void*>(NULL));
+  mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(method_ != nullptr);
     return method_;
   }
 
@@ -298,7 +314,7 @@
 
   void SetMethod(mirror::ArtMethod* method) {
 #if defined(ART_USE_PORTABLE_COMPILER)
-    DCHECK_NE(method, static_cast<void*>(NULL));
+    DCHECK(method != nullptr);
     method_ = method;
 #else
     UNUSED(method);
@@ -306,7 +322,7 @@
 #endif
   }
 
-  bool Contains(mirror::Object** shadow_frame_entry_obj) const {
+  bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
     if (HasReferenceArray()) {
       return ((&References()[0] <= shadow_frame_entry_obj) &&
               (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
@@ -346,22 +362,22 @@
       CHECK_LT(num_vregs, static_cast<uint32_t>(kHasReferenceArray));
       number_of_vregs_ |= kHasReferenceArray;
 #endif
-      memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(mirror::Object*)));
+      memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
     } else {
       memset(vregs_, 0, num_vregs * sizeof(uint32_t));
     }
   }
 
-  mirror::Object* const* References() const {
+  const StackReference<mirror::Object>* References() const {
     DCHECK(HasReferenceArray());
     const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
-    return reinterpret_cast<mirror::Object* const*>(vreg_end);
+    return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end);
   }
 
   bool VerifyReference(const mirror::Object* val) const;
 
-  mirror::Object** References() {
-    return const_cast<mirror::Object**>(const_cast<const ShadowFrame*>(this)->References());
+  StackReference<mirror::Object>* References() {
+    return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
   }
 
 #if defined(ART_USE_PORTABLE_COMPILER)
@@ -470,9 +486,9 @@
     return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
   }
 
-  size_t NumJniShadowFrameReferences() const;
+  size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool ShadowFramesContain(mirror::Object** shadow_frame_entry) const;
+  bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
 
  private:
   ManagedStack* link_;
@@ -494,18 +510,18 @@
   void WalkStack(bool include_transitions = false)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  mirror::ArtMethod* GetMethod() const {
-    if (cur_shadow_frame_ != NULL) {
+  mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (cur_shadow_frame_ != nullptr) {
       return cur_shadow_frame_->GetMethod();
-    } else if (cur_quick_frame_ != NULL) {
+    } else if (cur_quick_frame_ != nullptr) {
       return *cur_quick_frame_;
     } else {
-      return NULL;
+      return nullptr;
     }
   }
 
   bool IsShadowFrame() const {
-    return cur_shadow_frame_ != NULL;
+    return cur_shadow_frame_ != nullptr;
   }
 
   uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -514,9 +530,10 @@
 
   size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const {
+  uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Callee saves are held at the top of the frame
-    DCHECK(GetMethod() != NULL);
+    DCHECK(GetMethod() != nullptr);
     byte* save_addr =
         reinterpret_cast<byte*>(cur_quick_frame_) + frame_size - ((num + 1) * kPointerSize);
 #if defined(__i386__)
@@ -553,17 +570,17 @@
 
   // This is a fast-path for getting/setting values in a quick frame.
   uint32_t* GetVRegAddr(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
-                   uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
-                   uint16_t vreg) const {
+                        uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
+                        uint16_t vreg) const {
     int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
     DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
     byte* vreg_addr = reinterpret_cast<byte*>(cur_quick_frame) + offset;
     return reinterpret_cast<uint32_t*>(vreg_addr);
   }
 
-  uintptr_t GetReturnPc() const;
+  uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetReturnPc(uintptr_t new_ret_pc);
+  void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * Return sp-relative offset for a Dalvik virtual register, compiler