Merge "Quick compiler: enable Arm64 compilation"
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index b3fac77..638c590 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -363,20 +363,27 @@
 INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
             RegLocation arg1, bool safepoint_pc)
 
+// TODO: This is a hack! Reshape the two macros into functions and move them to a better place.
+#define IsSameReg(r1, r2) \
+  (GetRegInfo(r1)->Master()->GetReg().GetReg() == GetRegInfo(r2)->Master()->GetReg().GetReg())
+#define TargetArgReg(arg, is_wide) \
+  (GetRegInfo(TargetReg(arg))->FindMatchingView( \
+     (is_wide) ? RegisterInfo::k64SoloStorageMask : RegisterInfo::k32SoloStorageMask)->GetReg())
+
 void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
-  if (arg1.GetReg() == TargetReg(kArg0).GetReg()) {
-    if (arg0.GetReg() == TargetReg(kArg1).GetReg()) {
+  if (IsSameReg(arg1, TargetReg(kArg0))) {
+    if (IsSameReg(arg0, TargetReg(kArg1))) {
       // Swap kArg0 and kArg1 with kArg2 as temp.
-      OpRegCopy(TargetReg(kArg2), arg1);
-      OpRegCopy(TargetReg(kArg0), arg0);
-      OpRegCopy(TargetReg(kArg1), TargetReg(kArg2));
+      OpRegCopy(TargetArgReg(kArg2, arg1.Is64Bit()), arg1);
+      OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
+      OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), TargetReg(kArg2));
     } else {
-      OpRegCopy(TargetReg(kArg1), arg1);
-      OpRegCopy(TargetReg(kArg0), arg0);
+      OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), arg1);
+      OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
     }
   } else {
-    OpRegCopy(TargetReg(kArg0), arg0);
-    OpRegCopy(TargetReg(kArg1), arg1);
+    OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
+    OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), arg1);
   }
 }
 
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 3ff55ab..10f34d9 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -935,8 +935,7 @@
   field_.field_idx_delta_ = DecodeUnsignedLeb128(&ptr_pos_);
   field_.access_flags_ = DecodeUnsignedLeb128(&ptr_pos_);
   if (last_idx_ != 0 && field_.field_idx_delta_ == 0) {
-    LOG(WARNING) << "Duplicate field " << PrettyField(GetMemberIndex(), dex_file_)
-                 << " in " << dex_file_.GetLocation();
+    LOG(WARNING) << "Duplicate field in " << dex_file_.GetLocation();
   }
 }
 
@@ -945,8 +944,7 @@
   method_.access_flags_ = DecodeUnsignedLeb128(&ptr_pos_);
   method_.code_off_ = DecodeUnsignedLeb128(&ptr_pos_);
   if (last_idx_ != 0 && method_.method_idx_delta_ == 0) {
-    LOG(WARNING) << "Duplicate method " << PrettyMethod(GetMemberIndex(), dex_file_)
-                 << " in " << dex_file_.GetLocation();
+    LOG(WARNING) << "Duplicate method in " << dex_file_.GetLocation();
   }
 }
 
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index b787233..f561643 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -59,8 +59,7 @@
 
 // Make sure that the entry at "idx" is correctly paired with "iref".
 inline bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const {
-  const mirror::Object* obj = table_[idx];
-  IndirectRef checkRef = ToIndirectRef(obj, idx);
+  IndirectRef checkRef = ToIndirectRef(idx);
   if (UNLIKELY(checkRef != iref)) {
     LOG(ERROR) << "JNI ERROR (app bug): attempt to " << what
                << " stale " << kind_ << " " << iref
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 98e1d21..ad798ed 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -137,13 +137,13 @@
       DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
     }
     UpdateSlotAdd(obj, pScan - table_);
-    result = ToIndirectRef(obj, pScan - table_);
+    result = ToIndirectRef(pScan - table_);
     *pScan = obj;
     segment_state_.parts.numHoles--;
   } else {
     // Add to the end.
     UpdateSlotAdd(obj, topIndex);
-    result = ToIndirectRef(obj, topIndex);
+    result = ToIndirectRef(topIndex);
     table_[topIndex++] = obj;
     segment_state_.parts.topIndex = topIndex;
   }
@@ -277,9 +277,6 @@
       // while the read barrier won't.
       entries.push_back(obj);
     } else {
-      // We need a read barrier if weak globals. Since this is for
-      // debugging where performance isn't top priority, we
-      // unconditionally enable the read barrier, which is conservative.
       obj = ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
       entries.push_back(obj);
     }
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 5b3ed68..b3a855d 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -27,6 +27,7 @@
 #include "mem_map.h"
 #include "object_callbacks.h"
 #include "offsets.h"
+#include "read_barrier.h"
 
 namespace art {
 namespace mirror {
@@ -215,6 +216,7 @@
   }
 
   mirror::Object** operator*() {
+    // This does not have a read barrier as this is used to visit roots.
     return &table_[i_];
   }
 
@@ -298,6 +300,7 @@
     return segment_state_.parts.topIndex;
   }
 
+  // Note IrtIterator does not have a read barrier as it's used to visit roots.
   IrtIterator begin() {
     return IrtIterator(table_, 0, Capacity());
   }
@@ -333,7 +336,7 @@
    * The object pointer itself is subject to relocation in some GC
    * implementations, so we shouldn't really be using it here.
    */
-  IndirectRef ToIndirectRef(const mirror::Object* /*o*/, uint32_t tableIndex) const {
+  IndirectRef ToIndirectRef(uint32_t tableIndex) const {
     DCHECK_LT(tableIndex, 65536U);
     uint32_t serialChunk = slot_data_[tableIndex].serial;
     uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_;
@@ -368,9 +371,8 @@
   std::unique_ptr<MemMap> table_mem_map_;
   // Mem map where we store the extended debugging info.
   std::unique_ptr<MemMap> slot_mem_map_;
-  // bottom of the stack. If a JNI weak global table, do not directly
-  // access the object references in this as they are weak roots. Use
-  // Get() that has a read barrier.
+  // bottom of the stack. Do not directly access the object references
+  // in this as they are roots. Use Get() that has a read barrier.
   mirror::Object** table_;
   /* bit mask, ORed into all irefs */
   IndirectRefKind kind_;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 5606d47..513b409 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -359,8 +359,9 @@
         jni_on_load_result_(kPending) {
   }
 
-  mirror::Object* GetClassLoader() {
-    return class_loader_;
+  mirror::Object* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object** root = &class_loader_;
+    return ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
   }
 
   std::string GetPath() {
@@ -3160,9 +3161,7 @@
   while (UNLIKELY(!allow_new_weak_globals_)) {
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
-  // The weak globals do need a read barrier as they are weak roots.
-  mirror::Object* obj = weak_globals_.Get<kWithReadBarrier>(ref);
-  return obj;
+  return weak_globals_.Get(ref);
 }
 
 void JavaVMExt::DumpReferenceTables(std::ostream& os) {
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 451d13c..ed5db4e 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -33,11 +33,15 @@
 
 class ReadBarrier {
  public:
+  // It's up to the implementation whether the given field gets
+  // updated whereas the return value must be an updated reference.
   template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE static MirrorType* Barrier(
       mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // It's up to the implementation whether the given root gets updated
+  // whereas the return value must be an updated reference.
   template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 11527fa..cd35863 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -24,6 +24,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/string-inl.h"
+#include "read_barrier.h"
 #include "thread.h"
 #include "utils.h"
 
@@ -51,7 +52,9 @@
 void ReferenceTable::Remove(mirror::Object* obj) {
   // We iterate backwards on the assumption that references are LIFO.
   for (int i = entries_.size() - 1; i >= 0; --i) {
-    if (entries_[i] == obj) {
+    mirror::Object* entry =
+        ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries_[i]);
+    if (entry == obj) {
       entries_.erase(entries_.begin() + i);
       return;
     }
@@ -140,12 +143,12 @@
   return entries_.size();
 }
 
-void ReferenceTable::Dump(std::ostream& os) const {
+void ReferenceTable::Dump(std::ostream& os) {
   os << name_ << " reference table dump:\n";
   Dump(os, entries_);
 }
 
-void ReferenceTable::Dump(std::ostream& os, const Table& entries) {
+void ReferenceTable::Dump(std::ostream& os, Table& entries) {
   if (entries.empty()) {
     os << "  (empty)\n";
     return;
@@ -160,7 +163,8 @@
   }
   os << "  Last " << (count - first) << " entries (of " << count << "):\n";
   for (int idx = count - 1; idx >= first; --idx) {
-    mirror::Object* ref = entries[idx];
+    mirror::Object* ref =
+        ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[idx]);
     if (ref == NULL) {
       continue;
     }
@@ -194,7 +198,12 @@
   }
 
   // Make a copy of the table and sort it.
-  Table sorted_entries(entries.begin(), entries.end());
+  Table sorted_entries;
+  for (size_t i = 0; i < entries.size(); ++i) {
+    mirror::Object* entry =
+        ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[i]);
+    sorted_entries.push_back(entry);
+  }
   std::sort(sorted_entries.begin(), sorted_entries.end(), ObjectComparator());
 
   // Remove any uninteresting stuff from the list. The sort moved them all to the end.
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 45309c9..1cd0999 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -39,19 +39,19 @@
   ReferenceTable(const char* name, size_t initial_size, size_t max_size);
   ~ReferenceTable();
 
-  void Add(mirror::Object* obj);
+  void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void Remove(mirror::Object* obj);
+  void Remove(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   size_t Size() const;
 
-  void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void VisitRoots(RootCallback* visitor, void* arg, uint32_t tid, RootType root_type);
 
  private:
   typedef std::vector<mirror::Object*> Table;
-  static void Dump(std::ostream& os, const Table& entries)
+  static void Dump(std::ostream& os, Table& entries)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   friend class IndirectReferenceTable;  // For Dump.
 
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 7ce68c6..d691623 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -146,7 +146,8 @@
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     CHECK(!kMovingFields);
-    return reinterpret_cast<mirror::ArtField*>(fid);
+    mirror::ArtField* field = reinterpret_cast<mirror::ArtField*>(fid);
+    return ReadBarrier::BarrierForRoot<mirror::ArtField, kWithReadBarrier>(&field);
   }
 
   jfieldID EncodeField(mirror::ArtField* field) const
@@ -162,7 +163,8 @@
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     CHECK(!kMovingMethods);
-    return reinterpret_cast<mirror::ArtMethod*>(mid);
+    mirror::ArtMethod* method = reinterpret_cast<mirror::ArtMethod*>(mid);
+    return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&method);
   }
 
   jmethodID EncodeMethod(mirror::ArtMethod* method) const
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3f8f4a3..e5ae6d0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1298,9 +1298,7 @@
     }
   } else if (kind == kGlobal) {
     JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
-    // Strong global references do not need a read barrier.
-    result = vm->globals.SynchronizedGet<kWithoutReadBarrier>(
-        const_cast<Thread*>(this), &vm->globals_lock, ref);
+    result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
   } else {
     DCHECK_EQ(kind, kWeakGlobal);
     result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index c9c3bba..f8e75ea 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1219,6 +1219,12 @@
         break;
       case 'J':
       case 'D': {
+        if (cur_arg + 1 >= expected_args) {
+          Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args
+              << " args, found more (" << descriptor << ")";
+          return false;
+        }
+
         const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
         const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
         reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index d21f39b..556056c 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -55,7 +55,7 @@
 
 bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
                                        const RegType& new_type2) {
-  DCHECK_LT(vdst, num_regs_);
+  DCHECK_LT(vdst + 1, num_regs_);
   if (!new_type1.CheckWidePair(new_type2)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
         << new_type1 << "' '" << new_type2 << "'";