Merge "ART: Add unstarted-runtime functions"
diff --git a/runtime/atomic.cc b/runtime/atomic.cc
index e766a8d..d5ae570 100644
--- a/runtime/atomic.cc
+++ b/runtime/atomic.cc
@@ -28,7 +28,7 @@
 }
 
 void QuasiAtomic::Startup() {
-  if (kNeedSwapMutexes) {
+  if (NeedSwapMutexes(kRuntimeISA)) {
     gSwapMutexes = new std::vector<Mutex*>;
     for (size_t i = 0; i < kSwapMutexCount; ++i) {
       gSwapMutexes->push_back(new Mutex("QuasiAtomic stripe", kSwapMutexesLock));
@@ -37,7 +37,7 @@
 }
 
 void QuasiAtomic::Shutdown() {
-  if (kNeedSwapMutexes) {
+  if (NeedSwapMutexes(kRuntimeISA)) {
     STLDeleteElements(gSwapMutexes);
     delete gSwapMutexes;
   }
diff --git a/runtime/atomic.h b/runtime/atomic.h
index d4a7f37..e2a7259 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -22,6 +22,7 @@
 #include <limits>
 #include <vector>
 
+#include "arch/instruction_set.h"
 #include "base/logging.h"
 #include "base/macros.h"
 
@@ -44,14 +45,10 @@
 // quasiatomic operations that are performed on partially-overlapping
 // memory.
 class QuasiAtomic {
-#if defined(__mips__) && !defined(__LP64__)
-  static constexpr bool kNeedSwapMutexes = true;
-#elif defined(__mips__) && defined(__LP64__)
-  // TODO - mips64 still need this for Cas64 ???
-  static constexpr bool kNeedSwapMutexes = true;
-#else
-  static constexpr bool kNeedSwapMutexes = false;
-#endif
+  static constexpr bool NeedSwapMutexes(InstructionSet isa) {
+    // TODO - mips64 still need this for Cas64 ???
+    return (isa == kMips) || (isa == kMips64);
+  }
 
  public:
   static void Startup();
@@ -60,7 +57,7 @@
 
   // Reads the 64-bit value at "addr" without tearing.
   static int64_t Read64(volatile const int64_t* addr) {
-    if (!kNeedSwapMutexes) {
+    if (!NeedSwapMutexes(kRuntimeISA)) {
       int64_t value;
 #if defined(__LP64__)
       value = *addr;
@@ -96,7 +93,7 @@
 
   // Writes to the 64-bit value at "addr" without tearing.
   static void Write64(volatile int64_t* addr, int64_t value) {
-    if (!kNeedSwapMutexes) {
+    if (!NeedSwapMutexes(kRuntimeISA)) {
 #if defined(__LP64__)
       *addr = value;
 #else
@@ -142,7 +139,7 @@
   // at some point during the execution of Cas64, *addr was not equal to
   // old_value.
   static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) {
-    if (!kNeedSwapMutexes) {
+    if (!NeedSwapMutexes(kRuntimeISA)) {
       return __sync_bool_compare_and_swap(addr, old_value, new_value);
     } else {
       return SwapMutexCas64(old_value, new_value, addr);
@@ -150,8 +147,8 @@
   }
 
   // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes?
-  static bool LongAtomicsUseMutexes() {
-    return kNeedSwapMutexes;
+  static bool LongAtomicsUseMutexes(InstructionSet isa) {
+    return NeedSwapMutexes(isa);
   }
 
   static void ThreadFenceAcquire() {
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e72f2a2..293451c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -66,8 +66,8 @@
   kRosAllocGlobalLock,
   kRosAllocBracketLock,
   kRosAllocBulkFreeLock,
-  kTransactionLogLock,
   kMarkSweepMarkStackLock,
+  kTransactionLogLock,
   kJniWeakGlobalsLock,
   kReferenceQueueSoftReferencesLock,
   kReferenceQueuePhantomReferencesLock,
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 0e175b8..b21f1ec 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -22,11 +22,13 @@
 #include "ScopedLocalRef.h"
 
 #include "art_method-inl.h"
+#include "base/casts.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "class_linker.h"
 #include "common_throws.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "gc/reference_processor.h"
 #include "handle_scope-inl.h"
 #include "interpreter/interpreter_common.h"
 #include "mirror/array-inl.h"
@@ -261,6 +263,25 @@
   }
 }
 
+// This is required for Enum(Set) code, as that uses reflection to inspect enum classes.
+void UnstartedRuntime::UnstartedClassGetDeclaredMethod(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  // Special managed code cut-out to allow method lookup in a un-started runtime.
+  mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+  if (klass == nullptr) {
+    ThrowNullPointerExceptionForMethodAccess(shadow_frame->GetMethod(), InvokeType::kVirtual);
+    return;
+  }
+  mirror::String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+  mirror::ObjectArray<mirror::Class>* args =
+      shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<mirror::Class>();
+  if (Runtime::Current()->IsActiveTransaction()) {
+    result->SetL(mirror::Class::GetDeclaredMethodInternal<true>(self, klass, name, args));
+  } else {
+    result->SetL(mirror::Class::GetDeclaredMethodInternal<false>(self, klass, name, args));
+  }
+}
+
 void UnstartedRuntime::UnstartedClassGetEnclosingClass(
     Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
   StackHandleScope<1> hs(self);
@@ -860,6 +881,155 @@
   result->SetL(string->ToCharArray(self));
 }
 
+// This allows statically initializing ConcurrentHashMap and SynchronousQueue.
+void UnstartedRuntime::UnstartedReferenceGetReferent(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  mirror::Reference* const ref = down_cast<mirror::Reference*>(
+      shadow_frame->GetVRegReference(arg_offset));
+  if (ref == nullptr) {
+    AbortTransactionOrFail(self, "Reference.getReferent() with null object");
+    return;
+  }
+  mirror::Object* const referent =
+      Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref);
+  result->SetL(referent);
+}
+
+// This allows statically initializing ConcurrentHashMap and SynchronousQueue. We use a somewhat
+// conservative upper bound. We restrict the callers to SynchronousQueue and ConcurrentHashMap,
+// where we can predict the behavior (somewhat).
+// Note: this is required (instead of lazy initialization) as these classes are used in the static
+//       initialization of other classes, so will *use* the value.
+void UnstartedRuntime::UnstartedRuntimeAvailableProcessors(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+  std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
+  if (caller == "void java.util.concurrent.SynchronousQueue.<clinit>()") {
+    // SynchronousQueue really only separates between single- and multiprocessor case. Return
+    // 8 as a conservative upper approximation.
+    result->SetI(8);
+  } else if (caller == "void java.util.concurrent.ConcurrentHashMap.<clinit>()") {
+    // ConcurrentHashMap uses it for striding. 8 still seems an OK general value, as it's likely
+    // a good upper bound.
+    // TODO: Consider resetting in the zygote?
+    result->SetI(8);
+  } else {
+    // Not supported.
+    AbortTransactionOrFail(self, "Accessing availableProcessors not allowed");
+  }
+}
+
+// This allows accessing ConcurrentHashMap/SynchronousQueue.
+
+void UnstartedRuntime::UnstartedUnsafeCompareAndSwapLong(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  // Argument 0 is the Unsafe instance, skip.
+  mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    return;
+  }
+  int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+  int64_t expectedValue = shadow_frame->GetVRegLong(arg_offset + 4);
+  int64_t newValue = shadow_frame->GetVRegLong(arg_offset + 6);
+
+  // Must use non transactional mode.
+  if (kUseReadBarrier) {
+    // Need to make sure the reference stored in the field is a to-space one before attempting the
+    // CAS or the CAS could fail incorrectly.
+    mirror::HeapReference<mirror::Object>* field_addr =
+        reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
+            reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
+    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+        obj,
+        MemberOffset(offset),
+        field_addr);
+  }
+  bool success;
+  // Check whether we're in a transaction, call accordingly.
+  if (Runtime::Current()->IsActiveTransaction()) {
+    success = obj->CasFieldStrongSequentiallyConsistent64<true>(MemberOffset(offset),
+                                                                expectedValue,
+                                                                newValue);
+  } else {
+    success = obj->CasFieldStrongSequentiallyConsistent64<false>(MemberOffset(offset),
+                                                                 expectedValue,
+                                                                 newValue);
+  }
+  result->SetZ(success ? 1 : 0);
+}
+
+void UnstartedRuntime::UnstartedUnsafeCompareAndSwapObject(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  // Argument 0 is the Unsafe instance, skip.
+  mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    return;
+  }
+  int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+  mirror::Object* expected_value = shadow_frame->GetVRegReference(arg_offset + 4);
+  mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 5);
+
+  // Must use non transactional mode.
+  if (kUseReadBarrier) {
+    // Need to make sure the reference stored in the field is a to-space one before attempting the
+    // CAS or the CAS could fail incorrectly.
+    mirror::HeapReference<mirror::Object>* field_addr =
+        reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
+            reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
+    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+        obj,
+        MemberOffset(offset),
+        field_addr);
+  }
+  bool success;
+  // Check whether we're in a transaction, call accordingly.
+  if (Runtime::Current()->IsActiveTransaction()) {
+    success = obj->CasFieldStrongSequentiallyConsistentObject<true>(MemberOffset(offset),
+                                                                    expected_value,
+                                                                    newValue);
+  } else {
+    success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset),
+                                                                     expected_value,
+                                                                     newValue);
+  }
+  result->SetZ(success ? 1 : 0);
+}
+
+void UnstartedRuntime::UnstartedUnsafeGetObjectVolatile(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  // Argument 0 is the Unsafe instance, skip.
+  mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    return;
+  }
+  int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+  mirror::Object* value = obj->GetFieldObjectVolatile<mirror::Object>(MemberOffset(offset));
+  result->SetL(value);
+}
+
+void UnstartedRuntime::UnstartedUnsafePutOrderedObject(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  // Argument 0 is the Unsafe instance, skip.
+  mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+  if (obj == nullptr) {
+    AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+    return;
+  }
+  int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+  mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 4);
+  QuasiAtomic::ThreadFenceRelease();
+  if (Runtime::Current()->IsActiveTransaction()) {
+    obj->SetFieldObject<true>(MemberOffset(offset), newValue);
+  } else {
+    obj->SetFieldObject<false>(MemberOffset(offset), newValue);
+  }
+}
+
+
 void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
     Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
     uint32_t* args, JValue* result) {
@@ -906,6 +1076,17 @@
   result->SetD(exp(value.GetD()));
 }
 
+void UnstartedRuntime::UnstartedJNIAtomicLongVMSupportsCS8(
+    Thread* self ATTRIBUTE_UNUSED,
+    ArtMethod* method ATTRIBUTE_UNUSED,
+    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    uint32_t* args ATTRIBUTE_UNUSED,
+    JValue* result) {
+  result->SetZ(QuasiAtomic::LongAtomicsUseMutexes(Runtime::Current()->GetInstructionSet())
+                   ? 0
+                   : 1);
+}
+
 void UnstartedRuntime::UnstartedJNIClassGetNameNative(
     Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
     uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
@@ -913,6 +1094,13 @@
   result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
 }
 
+void UnstartedRuntime::UnstartedJNIDoubleLongBitsToDouble(
+    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
+  uint64_t long_input = args[0] | (static_cast<uint64_t>(args[1]) << 32);
+  result->SetD(bit_cast<double>(long_input));
+}
+
 void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits(
     Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
     mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index 6d4d711..29f2197 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -24,6 +24,7 @@
   V(ClassClassForName, "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") \
   V(ClassNewInstance, "java.lang.Object java.lang.Class.newInstance()") \
   V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \
+  V(ClassGetDeclaredMethod, "java.lang.reflect.Method java.lang.Class.getDeclaredMethodInternal(java.lang.String, java.lang.Class[])") \
   V(ClassGetEnclosingClass, "java.lang.Class java.lang.Class.getEnclosingClass()") \
   V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \
   V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \
@@ -40,6 +41,8 @@
   V(MemoryPeekInt, "int libcore.io.Memory.peekIntNative(long)") \
   V(MemoryPeekLong, "long libcore.io.Memory.peekLongNative(long)") \
   V(MemoryPeekByteArray, "void libcore.io.Memory.peekByteArray(long, byte[], int, int)") \
+  V(ReferenceGetReferent, "java.lang.Object java.lang.ref.Reference.getReferent()") \
+  V(RuntimeAvailableProcessors, "int java.lang.Runtime.availableProcessors()") \
   V(SecurityGetSecurityPropertiesReader, "java.io.Reader java.security.Security.getSecurityPropertiesReader()") \
   V(StringGetCharsNoCheck, "void java.lang.String.getCharsNoCheck(int, int, char[], int)") \
   V(StringCharAt, "char java.lang.String.charAt(int)") \
@@ -47,7 +50,11 @@
   V(StringFactoryNewStringFromChars, "java.lang.String java.lang.StringFactory.newStringFromChars(int, int, char[])") \
   V(StringFactoryNewStringFromString, "java.lang.String java.lang.StringFactory.newStringFromString(java.lang.String)") \
   V(StringFastSubstring, "java.lang.String java.lang.String.fastSubstring(int, int)") \
-  V(StringToCharArray, "char[] java.lang.String.toCharArray()")
+  V(StringToCharArray, "char[] java.lang.String.toCharArray()") \
+  V(UnsafeCompareAndSwapLong, "boolean sun.misc.Unsafe.compareAndSwapLong(java.lang.Object, long, long, long)") \
+  V(UnsafeCompareAndSwapObject, "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") \
+  V(UnsafeGetObjectVolatile, "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") \
+  V(UnsafePutOrderedObject, "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)")
 
 // Methods that are native.
 #define UNSTARTED_RUNTIME_JNI_LIST(V)           \
@@ -56,7 +63,9 @@
   V(VMStackGetStackClass2, "java.lang.Class dalvik.system.VMStack.getStackClass2()") \
   V(MathLog, "double java.lang.Math.log(double)") \
   V(MathExp, "double java.lang.Math.exp(double)") \
+  V(AtomicLongVMSupportsCS8, "boolean java.util.concurrent.atomic.AtomicLong.VMSupportsCS8()") \
   V(ClassGetNameNative, "java.lang.String java.lang.Class.getNameNative()") \
+  V(DoubleLongBitsToDouble, "double java.lang.Double.longBitsToDouble(long)") \
   V(FloatFloatToRawIntBits, "int java.lang.Float.floatToRawIntBits(float)") \
   V(FloatIntBitsToFloat, "float java.lang.Float.intBitsToFloat(int)") \
   V(ObjectInternalClone, "java.lang.Object java.lang.Object.internalClone()") \
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 91a9870..5a07dee 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -21,25 +21,36 @@
 namespace art {
 namespace mirror {
 
+template <bool kTransactionActive>
 bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) {
-  auto* interface_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
-  SetArtMethod(method);
-  SetFieldObject<false>(DeclaringClassOffset(), method->GetDeclaringClass());
-  SetFieldObject<false>(
+  auto* interface_method = method->GetInterfaceMethodIfProxy(
+      kTransactionActive
+          ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
+          : sizeof(void*));
+  SetArtMethod<kTransactionActive>(method);
+  SetFieldObject<kTransactionActive>(DeclaringClassOffset(), method->GetDeclaringClass());
+  SetFieldObject<kTransactionActive>(
       DeclaringClassOfOverriddenMethodOffset(), interface_method->GetDeclaringClass());
-  SetField32<false>(AccessFlagsOffset(), method->GetAccessFlags());
-  SetField32<false>(DexMethodIndexOffset(), method->GetDexMethodIndex());
+  SetField32<kTransactionActive>(AccessFlagsOffset(), method->GetAccessFlags());
+  SetField32<kTransactionActive>(DexMethodIndexOffset(), method->GetDexMethodIndex());
   return true;
 }
 
+template bool AbstractMethod::CreateFromArtMethod<false>(ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<true>(ArtMethod* method);
+
 ArtMethod* AbstractMethod::GetArtMethod() {
   return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
 }
 
+template <bool kTransactionActive>
 void AbstractMethod::SetArtMethod(ArtMethod* method) {
-  SetField64<false>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
+  SetField64<kTransactionActive>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
 }
 
+template void AbstractMethod::SetArtMethod<false>(ArtMethod* method);
+template void AbstractMethod::SetArtMethod<true>(ArtMethod* method);
+
 mirror::Class* AbstractMethod::GetDeclaringClass() {
   return GetFieldObject<mirror::Class>(DeclaringClassOffset());
 }
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index dc084be..a39f94d 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -34,11 +34,13 @@
 class MANAGED AbstractMethod : public AccessibleObject {
  public:
   // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
+  template <bool kTransactionActive = false>
   bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
   // Only used by the image writer.
+  template <bool kTransactionActive = false>
   void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 9190e44..7900eac 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1054,5 +1054,89 @@
   return (type_id == nullptr) ? DexFile::kDexNoIndex : dex_file.GetIndexForTypeId(*type_id);
 }
 
+template <bool kTransactionActive>
+mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
+                                                 mirror::Class* klass,
+                                                 mirror::String* name,
+                                                 mirror::ObjectArray<mirror::Class>* args) {
+  // Covariant return types permit the class to define multiple
+  // methods with the same name and parameter types. Prefer to
+  // return a non-synthetic method in such situations. We may
+  // still return a synthetic method to handle situations like
+  // escalated visibility. We never return miranda methods that
+  // were synthesized by the runtime.
+  constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
+  StackHandleScope<3> hs(self);
+  auto h_method_name = hs.NewHandle(name);
+  if (UNLIKELY(h_method_name.Get() == nullptr)) {
+    ThrowNullPointerException("name == null");
+    return nullptr;
+  }
+  auto h_args = hs.NewHandle(args);
+  Handle<mirror::Class> h_klass = hs.NewHandle(klass);
+  ArtMethod* result = nullptr;
+  const size_t pointer_size = kTransactionActive
+                                  ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
+                                  : sizeof(void*);
+  for (auto& m : h_klass->GetDeclaredVirtualMethods(pointer_size)) {
+    auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size);
+    // May cause thread suspension.
+    mirror::String* np_name = np_method->GetNameAsString(self);
+    if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+      if (UNLIKELY(self->IsExceptionPending())) {
+        return nullptr;
+      }
+      continue;
+    }
+    auto modifiers = m.GetAccessFlags();
+    if ((modifiers & kSkipModifiers) == 0) {
+      return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m);
+    }
+    if ((modifiers & kAccMiranda) == 0) {
+      result = &m;  // Remember as potential result if it's not a miranda method.
+    }
+  }
+  if (result == nullptr) {
+    for (auto& m : h_klass->GetDirectMethods(pointer_size)) {
+      auto modifiers = m.GetAccessFlags();
+      if ((modifiers & kAccConstructor) != 0) {
+        continue;
+      }
+      auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size);
+      // May cause thread suspension.
+      mirror::String* np_name = np_method->GetNameAsString(self);
+      if (np_name == nullptr) {
+        self->AssertPendingException();
+        return nullptr;
+      }
+      if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+        if (UNLIKELY(self->IsExceptionPending())) {
+          return nullptr;
+        }
+        continue;
+      }
+      if ((modifiers & kSkipModifiers) == 0) {
+        return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m);
+      }
+      // Direct methods cannot be miranda methods, so this potential result must be synthetic.
+      result = &m;
+    }
+  }
+  return result != nullptr
+      ? mirror::Method::CreateFromArtMethod<kTransactionActive>(self, result)
+      : nullptr;
+}
+
+template
+mirror::Method* Class::GetDeclaredMethodInternal<false>(Thread* self,
+                                                        mirror::Class* klass,
+                                                        mirror::String* name,
+                                                        mirror::ObjectArray<mirror::Class>* args);
+template
+mirror::Method* Class::GetDeclaredMethodInternal<true>(Thread* self,
+                                                       mirror::Class* klass,
+                                                       mirror::String* name,
+                                                       mirror::ObjectArray<mirror::Class>* args);
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 6e3463c..7082c88 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -55,6 +55,7 @@
 class Constructor;
 class DexCache;
 class IfTable;
+class Method;
 
 // C++ mirror of java.lang.Class
 class MANAGED Class FINAL : public Object {
@@ -759,6 +760,13 @@
         size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  template <bool kTransactionActive = false>
+  static Method* GetDeclaredMethodInternal(Thread* self,
+                                           mirror::Class* klass,
+                                           mirror::String* name,
+                                           mirror::ObjectArray<mirror::Class>* args)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 85c52e9..97973e6 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -51,15 +51,19 @@
   array_class_ = GcRoot<Class>(nullptr);
 }
 
+template <bool kTransactionActive>
 Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
   DCHECK(!method->IsConstructor()) << PrettyMethod(method);
   auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
   if (LIKELY(ret != nullptr)) {
-    static_cast<AbstractMethod*>(ret)->CreateFromArtMethod(method);
+    static_cast<AbstractMethod*>(ret)->CreateFromArtMethod<kTransactionActive>(method);
   }
   return ret;
 }
 
+template Method* Method::CreateFromArtMethod<false>(Thread* self, ArtMethod* method);
+template Method* Method::CreateFromArtMethod<true>(Thread* self, ArtMethod* method);
+
 void Method::VisitRoots(RootVisitor* visitor) {
   static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
   array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 0c28e4f..12a72fe 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -28,6 +28,7 @@
 // C++ mirror of java.lang.reflect.Method.
 class MANAGED Method : public AbstractMethod {
  public:
+  template <bool kTransactionActive = false>
   static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index b5d859b..bf24de5 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -371,70 +371,13 @@
 
 static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
                                                jobject name, jobjectArray args) {
-  // Covariant return types permit the class to define multiple
-  // methods with the same name and parameter types. Prefer to
-  // return a non-synthetic method in such situations. We may
-  // still return a synthetic method to handle situations like
-  // escalated visibility. We never return miranda methods that
-  // were synthesized by the runtime.
-  constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
   ScopedFastNativeObjectAccess soa(env);
-  StackHandleScope<3> hs(soa.Self());
-  auto h_method_name = hs.NewHandle(soa.Decode<mirror::String*>(name));
-  if (UNLIKELY(h_method_name.Get() == nullptr)) {
-    ThrowNullPointerException("name == null");
-    return nullptr;
-  }
-  auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
-  Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
-  ArtMethod* result = nullptr;
-  for (auto& m : h_klass->GetDeclaredVirtualMethods(sizeof(void*))) {
-    auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
-    // May cause thread suspension.
-    mirror::String* np_name = np_method->GetNameAsString(soa.Self());
-    if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
-      if (UNLIKELY(soa.Self()->IsExceptionPending())) {
-        return nullptr;
-      }
-      continue;
-    }
-    auto modifiers = m.GetAccessFlags();
-    if ((modifiers & kSkipModifiers) == 0) {
-      return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m));
-    }
-    if ((modifiers & kAccMiranda) == 0) {
-      result = &m;  // Remember as potential result if it's not a miranda method.
-    }
-  }
-  if (result == nullptr) {
-    for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
-      auto modifiers = m.GetAccessFlags();
-      if ((modifiers & kAccConstructor) != 0) {
-        continue;
-      }
-      auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
-      // May cause thread suspension.
-      mirror::String* np_name = np_method->GetNameAsString(soa.Self());
-      if (np_name == nullptr) {
-        soa.Self()->AssertPendingException();
-        return nullptr;
-      }
-      if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
-        if (UNLIKELY(soa.Self()->IsExceptionPending())) {
-          return nullptr;
-        }
-        continue;
-      }
-      if ((modifiers & kSkipModifiers) == 0) {
-        return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m));
-      }
-      // Direct methods cannot be miranda methods, so this potential result must be synthetic.
-      result = &m;
-    }
-  }
-  return result != nullptr ?
-      soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), result)) :
-      nullptr;
+  mirror::Method* result = mirror::Class::GetDeclaredMethodInternal(
+      soa.Self(),
+      DecodeClass(soa, javaThis),
+      soa.Decode<mirror::String*>(name),
+      soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
+  return soa.AddLocalReference<jobject>(result);
 }
 
 static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis,
diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
index 04f0ba0..4d2ea67 100644
--- a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
+++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
@@ -16,13 +16,14 @@
 
 #include "java_util_concurrent_atomic_AtomicLong.h"
 
+#include "arch/instruction_set.h"
 #include "atomic.h"
 #include "jni_internal.h"
 
 namespace art {
 
 static jboolean AtomicLong_VMSupportsCS8(JNIEnv*, jclass) {
-  return QuasiAtomic::LongAtomicsUseMutexes() ? JNI_FALSE : JNI_TRUE;
+  return QuasiAtomic::LongAtomicsUseMutexes(kRuntimeISA) ? JNI_FALSE : JNI_TRUE;
 }
 
 static JNINativeMethod gMethods[] = {