Merge "Shared single GC iteration accounting for all GCs."
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index b3fac77..638c590 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -363,20 +363,27 @@
 INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
             RegLocation arg1, bool safepoint_pc)
 
+// TODO: This is a hack! Reshape the two macros into functions and move them to a better place.
+#define IsSameReg(r1, r2) \
+  (GetRegInfo(r1)->Master()->GetReg().GetReg() == GetRegInfo(r2)->Master()->GetReg().GetReg())
+#define TargetArgReg(arg, is_wide) \
+  (GetRegInfo(TargetReg(arg))->FindMatchingView( \
+     (is_wide) ? RegisterInfo::k64SoloStorageMask : RegisterInfo::k32SoloStorageMask)->GetReg())
+
 void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
-  if (arg1.GetReg() == TargetReg(kArg0).GetReg()) {
-    if (arg0.GetReg() == TargetReg(kArg1).GetReg()) {
+  if (IsSameReg(arg1, TargetReg(kArg0))) {
+    if (IsSameReg(arg0, TargetReg(kArg1))) {
       // Swap kArg0 and kArg1 with kArg2 as temp.
-      OpRegCopy(TargetReg(kArg2), arg1);
-      OpRegCopy(TargetReg(kArg0), arg0);
-      OpRegCopy(TargetReg(kArg1), TargetReg(kArg2));
+      OpRegCopy(TargetArgReg(kArg2, arg1.Is64Bit()), arg1);
+      OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
+      OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), TargetReg(kArg2));
     } else {
-      OpRegCopy(TargetReg(kArg1), arg1);
-      OpRegCopy(TargetReg(kArg0), arg0);
+      OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), arg1);
+      OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
     }
   } else {
-    OpRegCopy(TargetReg(kArg0), arg0);
-    OpRegCopy(TargetReg(kArg1), arg1);
+    OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
+    OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), arg1);
   }
 }
 
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 6469d9c..2dd7def 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -213,8 +213,13 @@
   ResetDefLoc(rl_dest);
   if (IsDirty(rl_dest.reg) && LiveOut(rl_dest.s_reg_low)) {
     def_start = last_lir_insn_;
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
+    if (rl_dest.ref) {
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kHeapRef);
+      StoreRefDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
+    } else {
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+      Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
+    }
     MarkClean(rl_dest);
     def_end = last_lir_insn_;
     if (!rl_dest.ref) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index b6b5313..a793513 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1136,9 +1136,8 @@
   }
 
   if (compiler_filter_string == nullptr) {
-    if (instruction_set == kArm64 ||
-        instruction_set == kMips) {
-      // TODO: implement/fix compilers for these architectures.
+    if (instruction_set == kMips) {
+      // TODO: fix compiler for Mips.
       compiler_filter_string = "interpret-only";
     } else if (image) {
       compiler_filter_string = "speed";
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 6d5b59c..f29a7ec 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -122,7 +122,9 @@
   char* ptr = static_cast<char*>(buffer);
   while (byte_count > 0) {
     ssize_t bytes_read = TEMP_FAILURE_RETRY(read(fd_, ptr, byte_count));
-    if (bytes_read == -1) {
+    if (bytes_read <= 0) {
+      // 0: end of file
+      // -1: error
       return false;
     }
     byte_count -= bytes_read;  // Reduce the number of remaining bytes.
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index d620666..33b3d3e 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -16,6 +16,7 @@
 
 #include "base/unix_file/fd_file.h"
 #include "base/unix_file/random_access_file_test.h"
+#include "common_runtime_test.h"  // For ScratchFile
 #include "gtest/gtest.h"
 
 namespace unix_file {
@@ -60,4 +61,15 @@
   EXPECT_TRUE(file.IsOpened());
 }
 
+TEST_F(FdFileTest, ReadFullyEmptyFile) {
+  // New scratch file, zero-length.
+  art::ScratchFile tmp;
+  FdFile file;
+  ASSERT_TRUE(file.Open(tmp.GetFilename(), O_RDONLY));
+  EXPECT_GE(file.Fd(), 0);
+  EXPECT_TRUE(file.IsOpened());
+  uint8_t buffer[16];
+  EXPECT_FALSE(file.ReadFully(&buffer, 4));
+}
+
 }  // namespace unix_file
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index b787233..f561643 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -59,8 +59,7 @@
 
 // Make sure that the entry at "idx" is correctly paired with "iref".
 inline bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const {
-  const mirror::Object* obj = table_[idx];
-  IndirectRef checkRef = ToIndirectRef(obj, idx);
+  IndirectRef checkRef = ToIndirectRef(idx);
   if (UNLIKELY(checkRef != iref)) {
     LOG(ERROR) << "JNI ERROR (app bug): attempt to " << what
                << " stale " << kind_ << " " << iref
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 98e1d21..ad798ed 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -137,13 +137,13 @@
       DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
     }
     UpdateSlotAdd(obj, pScan - table_);
-    result = ToIndirectRef(obj, pScan - table_);
+    result = ToIndirectRef(pScan - table_);
     *pScan = obj;
     segment_state_.parts.numHoles--;
   } else {
     // Add to the end.
     UpdateSlotAdd(obj, topIndex);
-    result = ToIndirectRef(obj, topIndex);
+    result = ToIndirectRef(topIndex);
     table_[topIndex++] = obj;
     segment_state_.parts.topIndex = topIndex;
   }
@@ -277,9 +277,6 @@
       // while the read barrier won't.
       entries.push_back(obj);
     } else {
-      // We need a read barrier if weak globals. Since this is for
-      // debugging where performance isn't top priority, we
-      // unconditionally enable the read barrier, which is conservative.
       obj = ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
       entries.push_back(obj);
     }
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 5b3ed68..b3a855d 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -27,6 +27,7 @@
 #include "mem_map.h"
 #include "object_callbacks.h"
 #include "offsets.h"
+#include "read_barrier.h"
 
 namespace art {
 namespace mirror {
@@ -215,6 +216,7 @@
   }
 
   mirror::Object** operator*() {
+    // This does not have a read barrier as this is used to visit roots.
     return &table_[i_];
   }
 
@@ -298,6 +300,7 @@
     return segment_state_.parts.topIndex;
   }
 
+  // Note IrtIterator does not have a read barrier as it's used to visit roots.
   IrtIterator begin() {
     return IrtIterator(table_, 0, Capacity());
   }
@@ -333,7 +336,7 @@
    * The object pointer itself is subject to relocation in some GC
    * implementations, so we shouldn't really be using it here.
    */
-  IndirectRef ToIndirectRef(const mirror::Object* /*o*/, uint32_t tableIndex) const {
+  IndirectRef ToIndirectRef(uint32_t tableIndex) const {
     DCHECK_LT(tableIndex, 65536U);
     uint32_t serialChunk = slot_data_[tableIndex].serial;
     uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_;
@@ -368,9 +371,8 @@
   std::unique_ptr<MemMap> table_mem_map_;
   // Mem map where we store the extended debugging info.
   std::unique_ptr<MemMap> slot_mem_map_;
-  // bottom of the stack. If a JNI weak global table, do not directly
-  // access the object references in this as they are weak roots. Use
-  // Get() that has a read barrier.
+  // bottom of the stack. Do not directly access the object references
+  // in this as they are roots. Use Get() that has a read barrier.
   mirror::Object** table_;
   /* bit mask, ORed into all irefs */
   IndirectRefKind kind_;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 5606d47..513b409 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -359,8 +359,9 @@
         jni_on_load_result_(kPending) {
   }
 
-  mirror::Object* GetClassLoader() {
-    return class_loader_;
+  mirror::Object* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object** root = &class_loader_;
+    return ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
   }
 
   std::string GetPath() {
@@ -3160,9 +3161,7 @@
   while (UNLIKELY(!allow_new_weak_globals_)) {
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
-  // The weak globals do need a read barrier as they are weak roots.
-  mirror::Object* obj = weak_globals_.Get<kWithReadBarrier>(ref);
-  return obj;
+  return weak_globals_.Get(ref);
 }
 
 void JavaVMExt::DumpReferenceTables(std::ostream& os) {
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 451d13c..ed5db4e 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -33,11 +33,15 @@
 
 class ReadBarrier {
  public:
+  // It's up to the implementation whether the given field gets
+  // updated whereas the return value must be an updated reference.
   template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE static MirrorType* Barrier(
       mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // It's up to the implementation whether the given root gets updated
+  // whereas the return value must be an updated reference.
   template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 11527fa..cd35863 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -24,6 +24,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/string-inl.h"
+#include "read_barrier.h"
 #include "thread.h"
 #include "utils.h"
 
@@ -51,7 +52,9 @@
 void ReferenceTable::Remove(mirror::Object* obj) {
   // We iterate backwards on the assumption that references are LIFO.
   for (int i = entries_.size() - 1; i >= 0; --i) {
-    if (entries_[i] == obj) {
+    mirror::Object* entry =
+        ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries_[i]);
+    if (entry == obj) {
       entries_.erase(entries_.begin() + i);
       return;
     }
@@ -140,12 +143,12 @@
   return entries_.size();
 }
 
-void ReferenceTable::Dump(std::ostream& os) const {
+void ReferenceTable::Dump(std::ostream& os) {
   os << name_ << " reference table dump:\n";
   Dump(os, entries_);
 }
 
-void ReferenceTable::Dump(std::ostream& os, const Table& entries) {
+void ReferenceTable::Dump(std::ostream& os, Table& entries) {
   if (entries.empty()) {
     os << "  (empty)\n";
     return;
@@ -160,7 +163,8 @@
   }
   os << "  Last " << (count - first) << " entries (of " << count << "):\n";
   for (int idx = count - 1; idx >= first; --idx) {
-    mirror::Object* ref = entries[idx];
+    mirror::Object* ref =
+        ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[idx]);
     if (ref == NULL) {
       continue;
     }
@@ -194,7 +198,12 @@
   }
 
   // Make a copy of the table and sort it.
-  Table sorted_entries(entries.begin(), entries.end());
+  Table sorted_entries;
+  for (size_t i = 0; i < entries.size(); ++i) {
+    mirror::Object* entry =
+        ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[i]);
+    sorted_entries.push_back(entry);
+  }
   std::sort(sorted_entries.begin(), sorted_entries.end(), ObjectComparator());
 
   // Remove any uninteresting stuff from the list. The sort moved them all to the end.
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 45309c9..1cd0999 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -39,19 +39,19 @@
   ReferenceTable(const char* name, size_t initial_size, size_t max_size);
   ~ReferenceTable();
 
-  void Add(mirror::Object* obj);
+  void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void Remove(mirror::Object* obj);
+  void Remove(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   size_t Size() const;
 
-  void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void VisitRoots(RootCallback* visitor, void* arg, uint32_t tid, RootType root_type);
 
  private:
   typedef std::vector<mirror::Object*> Table;
-  static void Dump(std::ostream& os, const Table& entries)
+  static void Dump(std::ostream& os, Table& entries)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   friend class IndirectReferenceTable;  // For Dump.
 
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 7ce68c6..d691623 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -146,7 +146,8 @@
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     CHECK(!kMovingFields);
-    return reinterpret_cast<mirror::ArtField*>(fid);
+    mirror::ArtField* field = reinterpret_cast<mirror::ArtField*>(fid);
+    return ReadBarrier::BarrierForRoot<mirror::ArtField, kWithReadBarrier>(&field);
   }
 
   jfieldID EncodeField(mirror::ArtField* field) const
@@ -162,7 +163,8 @@
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     CHECK(!kMovingMethods);
-    return reinterpret_cast<mirror::ArtMethod*>(mid);
+    mirror::ArtMethod* method = reinterpret_cast<mirror::ArtMethod*>(mid);
+    return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&method);
   }
 
   jmethodID EncodeMethod(mirror::ArtMethod* method) const
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3f8f4a3..e5ae6d0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1298,9 +1298,7 @@
     }
   } else if (kind == kGlobal) {
     JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
-    // Strong global references do not need a read barrier.
-    result = vm->globals.SynchronizedGet<kWithoutReadBarrier>(
-        const_cast<Thread*>(this), &vm->globals_lock, ref);
+    result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
   } else {
     DCHECK_EQ(kind, kWeakGlobal);
     result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);