Object model changes to support 64bit.

Modify mirror objects so that references between them use an ObjectReference
value type rather than an Object* so that functionality to compress larger
references can be captured in the ObjectRefererence implementation.
ObjectReferences are 32bit and all other aspects of object layout remain as
they are currently.

Expand fields in objects holding pointers so they can hold 64bit pointers. Its
expected the size of these will come down by improving where we hold compiler
meta-data.
Stub out x86_64 architecture specific runtime implementation.
Modify OutputStream so that reads and writes are of unsigned quantities.
Make the use of portable or quick code more explicit.
Templatize AtomicInteger to support more than just int32_t as a type.
Add missing, and fix issues relating to, missing annotalysis information on the
mutator lock.
Refactor and share implementations for array copy between System and uses
elsewhere in the runtime.
Fix numerous 64bit build issues.

Change-Id: I1a5694c251a42c9eff71084dfdd4b51fff716822
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index cf2c9d0..6cb59b4 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -23,76 +23,111 @@
 
 class MemMapTest : public testing::Test {
  public:
-  byte* BaseBegin(MemMap* mem_map) {
+  static byte* BaseBegin(MemMap* mem_map) {
     return reinterpret_cast<byte*>(mem_map->base_begin_);
   }
-  size_t BaseSize(MemMap* mem_map) {
+  static size_t BaseSize(MemMap* mem_map) {
     return mem_map->base_size_;
   }
+
+  static void RemapAtEndTest(bool low_4gb) {
+    std::string error_msg;
+    // Cast the page size to size_t.
+    const size_t page_size = static_cast<size_t>(kPageSize);
+    // Map a two-page memory region.
+    MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
+                                      nullptr,
+                                      2 * page_size,
+                                      PROT_READ | PROT_WRITE,
+                                      low_4gb,
+                                      &error_msg);
+    // Check its state and write to it.
+    byte* base0 = m0->Begin();
+    ASSERT_TRUE(base0 != nullptr) << error_msg;
+    size_t size0 = m0->Size();
+    EXPECT_EQ(m0->Size(), 2 * page_size);
+    EXPECT_EQ(BaseBegin(m0), base0);
+    EXPECT_EQ(BaseSize(m0), size0);
+    memset(base0, 42, 2 * page_size);
+    // Remap the latter half into a second MemMap.
+    MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
+                                "MemMapTest_RemapAtEndTest_map1",
+                                PROT_READ | PROT_WRITE,
+                                &error_msg);
+    // Check the states of the two maps.
+    EXPECT_EQ(m0->Begin(), base0) << error_msg;
+    EXPECT_EQ(m0->Size(), page_size);
+    EXPECT_EQ(BaseBegin(m0), base0);
+    EXPECT_EQ(BaseSize(m0), page_size);
+    byte* base1 = m1->Begin();
+    size_t size1 = m1->Size();
+    EXPECT_EQ(base1, base0 + page_size);
+    EXPECT_EQ(size1, page_size);
+    EXPECT_EQ(BaseBegin(m1), base1);
+    EXPECT_EQ(BaseSize(m1), size1);
+    // Write to the second region.
+    memset(base1, 43, page_size);
+    // Check the contents of the two regions.
+    for (size_t i = 0; i < page_size; ++i) {
+      EXPECT_EQ(base0[i], 42);
+    }
+    for (size_t i = 0; i < page_size; ++i) {
+      EXPECT_EQ(base1[i], 43);
+    }
+    // Unmap the first region.
+    delete m0;
+    // Make sure the second region is still accessible after the first
+    // region is unmapped.
+    for (size_t i = 0; i < page_size; ++i) {
+      EXPECT_EQ(base1[i], 43);
+    }
+    delete m1;
+  }
 };
 
 TEST_F(MemMapTest, MapAnonymousEmpty) {
   std::string error_msg;
   UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                             NULL,
+                                             nullptr,
                                              0,
                                              PROT_READ,
+                                             false,
                                              &error_msg));
-  ASSERT_TRUE(map.get() != NULL) << error_msg;
+  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
+                                 nullptr,
+                                 kPageSize,
+                                 PROT_READ | PROT_WRITE,
+                                 false,
+                                 &error_msg));
+  ASSERT_TRUE(map.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
 }
 
-TEST_F(MemMapTest, RemapAtEnd) {
+#ifdef __LP64__
+TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
   std::string error_msg;
-  // Cast the page size to size_t.
-  const size_t page_size = static_cast<size_t>(kPageSize);
-  // Map a two-page memory region.
-  MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
-                                    NULL,
-                                    2 * page_size,
-                                    PROT_READ | PROT_WRITE,
-                                    &error_msg);
-  // Check its state and write to it.
-  byte* base0 = m0->Begin();
-  ASSERT_TRUE(base0 != NULL) << error_msg;
-  size_t size0 = m0->Size();
-  EXPECT_EQ(m0->Size(), 2 * page_size);
-  EXPECT_EQ(BaseBegin(m0), base0);
-  EXPECT_EQ(BaseSize(m0), size0);
-  memset(base0, 42, 2 * page_size);
-  // Remap the latter half into a second MemMap.
-  MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
-                              "MemMapTest_RemapAtEndTest_map1",
-                              PROT_READ | PROT_WRITE,
-                              &error_msg);
-  // Check the states of the two maps.
-  EXPECT_EQ(m0->Begin(), base0) << error_msg;
-  EXPECT_EQ(m0->Size(), page_size);
-  EXPECT_EQ(BaseBegin(m0), base0);
-  EXPECT_EQ(BaseSize(m0), page_size);
-  byte* base1 = m1->Begin();
-  size_t size1 = m1->Size();
-  EXPECT_EQ(base1, base0 + page_size);
-  EXPECT_EQ(size1, page_size);
-  EXPECT_EQ(BaseBegin(m1), base1);
-  EXPECT_EQ(BaseSize(m1), size1);
-  // Write to the second region.
-  memset(base1, 43, page_size);
-  // Check the contents of the two regions.
-  for (size_t i = 0; i < page_size; ++i) {
-    EXPECT_EQ(base0[i], 42);
-  }
-  for (size_t i = 0; i < page_size; ++i) {
-    EXPECT_EQ(base1[i], 43);
-  }
-  // Unmap the first region.
-  delete m0;
-  // Make sure the second region is still accessible after the first
-  // region is unmapped.
-  for (size_t i = 0; i < page_size; ++i) {
-    EXPECT_EQ(base1[i], 43);
-  }
-  delete m1;
+  UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
+                                             nullptr,
+                                             kPageSize,
+                                             PROT_READ | PROT_WRITE,
+                                             true,
+                                             &error_msg));
+  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
 }
+#endif
+
+TEST_F(MemMapTest, RemapAtEnd) {
+  RemapAtEndTest(false);
+}
+
+#ifdef __LP64__
+TEST_F(MemMapTest, RemapAtEnd32bit) {
+  RemapAtEndTest(true);
+}
+#endif
 
 }  // namespace art